These enums are essentially the same and always sorted in the
same order in every hypervisor with jobs. They can be generalized
by using the qemu enums as the main ones as they are the most
extensive.
Signed-off-by: Kristina Hanicova <khanicov(a)redhat.com>
---
src/hypervisor/domain_job.c | 32 +++
src/hypervisor/domain_job.h | 52 +++++
src/hypervisor/meson.build | 1 +
src/libvirt_private.syms | 5 +
src/libxl/libxl_domain.c | 1 +
src/qemu/MIGRATION.txt | 6 +-
src/qemu/THREADS.txt | 16 +-
src/qemu/qemu_backup.c | 22 +-
src/qemu/qemu_block.c | 20 +-
src/qemu/qemu_block.h | 12 +-
src/qemu/qemu_blockjob.c | 32 +--
src/qemu/qemu_checkpoint.c | 18 +-
src/qemu/qemu_domain.c | 26 +--
src/qemu/qemu_domain.h | 4 +-
src/qemu/qemu_domainjob.c | 236 ++++++++++------------
src/qemu/qemu_domainjob.h | 85 ++------
src/qemu/qemu_driver.c | 332 +++++++++++++++----------------
src/qemu/qemu_hotplug.c | 50 ++---
src/qemu/qemu_hotplug.h | 10 +-
src/qemu/qemu_migration.c | 218 ++++++++++----------
src/qemu/qemu_migration.h | 8 +-
src/qemu/qemu_migration_params.c | 4 +-
src/qemu/qemu_process.c | 188 ++++++++---------
src/qemu/qemu_process.h | 22 +-
src/qemu/qemu_saveimage.c | 4 +-
src/qemu/qemu_saveimage.h | 4 +-
src/qemu/qemu_snapshot.c | 56 +++---
src/qemu/qemu_snapshot.h | 2 +-
28 files changed, 739 insertions(+), 727 deletions(-)
diff --git a/src/hypervisor/domain_job.c b/src/hypervisor/domain_job.c
index 9ac8a6d544..ff4e008cb5 100644
--- a/src/hypervisor/domain_job.c
+++ b/src/hypervisor/domain_job.c
@@ -9,6 +9,38 @@
#include "domain_job.h"
+VIR_ENUM_IMPL(virDomainJob,
+ VIR_JOB_LAST,
+ "none",
+ "query",
+ "destroy",
+ "suspend",
+ "modify",
+ "abort",
+ "migration operation",
+ "none", /* async job is never stored in job.active */
+ "async nested",
+);
+
+VIR_ENUM_IMPL(virDomainAgentJob,
+ VIR_AGENT_JOB_LAST,
+ "none",
+ "query",
+ "modify",
+);
+
+VIR_ENUM_IMPL(virDomainAsyncJob,
+ VIR_ASYNC_JOB_LAST,
+ "none",
+ "migration out",
+ "migration in",
+ "save",
+ "dump",
+ "snapshot",
+ "start",
+ "backup",
+);
+
virDomainJobData *
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb)
{
diff --git a/src/hypervisor/domain_job.h b/src/hypervisor/domain_job.h
index 257ef067e4..b9d1107580 100644
--- a/src/hypervisor/domain_job.h
+++ b/src/hypervisor/domain_job.h
@@ -6,6 +6,58 @@
#pragma once
#include "internal.h"
+#include "virenum.h"
+
+/* Only 1 job is allowed at any time
+ * A job includes *all* monitor commands, even those just querying
+ * information, not merely actions */
+typedef enum {
+ VIR_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
+ VIR_JOB_QUERY, /* Doesn't change any state */
+ VIR_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
+ VIR_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
+ VIR_JOB_MODIFY, /* May change state */
+ VIR_JOB_ABORT, /* Abort current async job */
+ VIR_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
+
+ /* The following two items must always be the last items before JOB_LAST */
+ VIR_JOB_ASYNC, /* Asynchronous job */
+ VIR_JOB_ASYNC_NESTED, /* Normal job within an async job */
+
+ VIR_JOB_LAST
+} virDomainJob;
+VIR_ENUM_DECL(virDomainJob);
+
+
+/* Currently only QEMU driver uses agent jobs */
+typedef enum {
+ VIR_AGENT_JOB_NONE = 0, /* No agent job. */
+ VIR_AGENT_JOB_QUERY, /* Does not change state of domain */
+ VIR_AGENT_JOB_MODIFY, /* May change state of domain */
+
+ VIR_AGENT_JOB_LAST
+} virDomainAgentJob;
+VIR_ENUM_DECL(virDomainAgentJob);
+
+
+/* Async job consists of a series of jobs that may change state. Independent
+ * jobs that do not change state (and possibly others if explicitly allowed by
+ * current async job) are allowed to be run even if async job is active.
+ * Currently supported by QEMU only. */
+typedef enum {
+ VIR_ASYNC_JOB_NONE = 0,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_SAVE,
+ VIR_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_SNAPSHOT,
+ VIR_ASYNC_JOB_START,
+ VIR_ASYNC_JOB_BACKUP,
+
+ VIR_ASYNC_JOB_LAST
+} virDomainAsyncJob;
+VIR_ENUM_DECL(virDomainAsyncJob);
+
typedef enum {
VIR_DOMAIN_JOB_STATUS_NONE = 0,
diff --git a/src/hypervisor/meson.build b/src/hypervisor/meson.build
index ec11ec0cd8..7532f30ee2 100644
--- a/src/hypervisor/meson.build
+++ b/src/hypervisor/meson.build
@@ -19,6 +19,7 @@ hypervisor_lib = static_library(
],
include_directories: [
conf_inc_dir,
+ util_inc_dir,
],
)
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 03697d81a8..8a3e5f7f7c 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1577,10 +1577,15 @@ virDomainDriverSetupPersistentDefBlkioParams;
# hypervisor/domain_job.h
+virDomainAgentJobTypeToString;
+virDomainAsyncJobTypeFromString;
+virDomainAsyncJobTypeToString;
virDomainJobDataCopy;
virDomainJobDataFree;
virDomainJobDataInit;
virDomainJobStatusToType;
+virDomainJobTypeFromString;
+virDomainJobTypeToString;
# hypervisor/virclosecallbacks.h
diff --git a/src/libxl/libxl_domain.c b/src/libxl/libxl_domain.c
index d33e3811d1..2501f6b848 100644
--- a/src/libxl/libxl_domain.c
+++ b/src/libxl/libxl_domain.c
@@ -38,6 +38,7 @@
#include "xen_common.h"
#include "driver.h"
#include "domain_validate.h"
+#include "domain_job.h"
#define VIR_FROM_THIS VIR_FROM_LIBXL
diff --git a/src/qemu/MIGRATION.txt b/src/qemu/MIGRATION.txt
index e861fd001e..b75fe62788 100644
--- a/src/qemu/MIGRATION.txt
+++ b/src/qemu/MIGRATION.txt
@@ -73,14 +73,14 @@ The sequence of calling qemuMigrationJob* helper methods is as
follows:
- The first API of a migration protocol (Prepare or Perform/Begin depending on
migration type and version) has to start migration job and keep it active:
- qemuMigrationJobStart(driver, vm, QEMU_JOB_MIGRATION_{IN,OUT});
+ qemuMigrationJobStart(driver, vm, VIR_JOB_MIGRATION_{IN,OUT});
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
...do work...
qemuMigrationJobContinue(vm);
- All consequent phases except for the last one have to keep the job active:
- if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
+ if (!qemuMigrationJobIsActive(vm, VIR_JOB_MIGRATION_{IN,OUT}))
return;
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
...do work...
@@ -88,7 +88,7 @@ The sequence of calling qemuMigrationJob* helper methods is as follows:
- The last migration phase finally finishes the migration job:
- if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
+ if (!qemuMigrationJobIsActive(vm, VIR_JOB_MIGRATION_{IN,OUT}))
return;
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
...do work...
diff --git a/src/qemu/THREADS.txt b/src/qemu/THREADS.txt
index 30cf3ce210..b5f54f203c 100644
--- a/src/qemu/THREADS.txt
+++ b/src/qemu/THREADS.txt
@@ -186,7 +186,7 @@ To acquire the QEMU monitor lock as part of an asynchronous job
These functions are for use inside an asynchronous job; the caller
must check for a return of -1 (VM not running, so nothing to exit).
- Helper functions may also call this with QEMU_ASYNC_JOB_NONE when
+ Helper functions may also call this with VIR_ASYNC_JOB_NONE when
used from a sync job (such as when first starting a domain).
@@ -220,7 +220,7 @@ Design patterns
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
+ qemuDomainObjBeginJob(obj, VIR_JOB_TYPE);
...do work...
@@ -236,7 +236,7 @@ Design patterns
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginJob(obj, QEMU_JOB_TYPE);
+ qemuDomainObjBeginJob(obj, VIR_JOB_TYPE);
...do prep work...
@@ -259,7 +259,7 @@ Design patterns
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginAgentJob(obj, QEMU_AGENT_JOB_TYPE);
+ qemuDomainObjBeginAgentJob(obj, VIR_AGENT_JOB_TYPE);
...do prep work...
@@ -283,13 +283,13 @@ Design patterns
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
+ qemuDomainObjBeginAsyncJob(obj, VIR_ASYNC_JOB_TYPE);
qemuDomainObjSetAsyncJobMask(obj, allowedJobs);
...do prep work...
if (qemuDomainObjEnterMonitorAsync(driver, obj,
- QEMU_ASYNC_JOB_TYPE) < 0) {
+ VIR_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */
goto error;
}
@@ -298,7 +298,7 @@ Design patterns
while (!finished) {
if (qemuDomainObjEnterMonitorAsync(driver, obj,
- QEMU_ASYNC_JOB_TYPE) < 0) {
+ VIR_ASYNC_JOB_TYPE) < 0) {
/* domain died in the meantime */
goto error;
}
@@ -323,7 +323,7 @@ Design patterns
obj = qemuDomObjFromDomain(dom);
- qemuDomainObjBeginAsyncJob(obj, QEMU_ASYNC_JOB_TYPE);
+ qemuDomainObjBeginAsyncJob(obj, VIR_ASYNC_JOB_TYPE);
...do prep work...
diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c
index f31b840617..5d24155628 100644
--- a/src/qemu/qemu_backup.c
+++ b/src/qemu/qemu_backup.c
@@ -466,10 +466,10 @@ qemuBackupDiskPrepareOneStorage(virDomainObj *vm,
if (qemuBlockStorageSourceCreate(vm, dd->store, dd->backingStore, NULL,
dd->crdata->srcdata[0],
- QEMU_ASYNC_JOB_BACKUP) < 0)
+ VIR_ASYNC_JOB_BACKUP) < 0)
return -1;
} else {
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP)
< 0)
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP)
< 0)
return -1;
rc = qemuBlockStorageSourceAttachApply(priv->mon,
dd->crdata->srcdata[0]);
@@ -622,7 +622,7 @@ qemuBackupJobTerminate(virDomainObj *vm,
g_clear_pointer(&priv->backup, virDomainBackupDefFree);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_BACKUP)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_BACKUP)
qemuDomainObjEndAsyncJob(vm);
}
@@ -791,13 +791,13 @@ qemuBackupBegin(virDomainObj *vm,
* infrastructure for async jobs. We'll allow standard modify-type jobs
* as the interlocking of conflicting operations is handled on the block
* job level */
- if (qemuDomainObjBeginAsyncJob(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP,
+ if (qemuDomainObjBeginAsyncJob(priv->driver, vm, VIR_ASYNC_JOB_BACKUP,
VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0)
return -1;
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MODIFY)));
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MODIFY)));
qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
@@ -856,7 +856,7 @@ qemuBackupBegin(virDomainObj *vm,
goto endjob;
}
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_BACKUP)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_BACKUP)))
goto endjob;
if ((ndd = qemuBackupDiskPrepareData(vm, def, blockNamedNodeData, actions,
@@ -874,7 +874,7 @@ qemuBackupBegin(virDomainObj *vm,
priv->backup = g_steal_pointer(&def);
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) <
0)
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) <
0)
goto endjob;
if (pull) {
@@ -910,7 +910,7 @@ qemuBackupBegin(virDomainObj *vm,
}
if (pull) {
- if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP)
< 0)
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP)
< 0)
goto endjob;
/* note that if the export fails we've already created the checkpoint
* and we will not delete it */
@@ -918,7 +918,7 @@ qemuBackupBegin(virDomainObj *vm,
qemuDomainObjExitMonitor(vm);
if (rc < 0) {
- qemuBackupJobCancelBlockjobs(vm, priv->backup, false,
QEMU_ASYNC_JOB_BACKUP);
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, false,
VIR_ASYNC_JOB_BACKUP);
goto endjob;
}
}
@@ -932,7 +932,7 @@ qemuBackupBegin(virDomainObj *vm,
qemuCheckpointRollbackMetadata(vm, chk);
if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) &&
- qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) == 0)
{
+ qemuDomainObjEnterMonitorAsync(priv->driver, vm, VIR_ASYNC_JOB_BACKUP) == 0)
{
if (nbd_running)
ignore_value(qemuMonitorNBDServerStop(priv->mon));
if (tlsAlias)
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
index f70b6d3e63..3d961c8b39 100644
--- a/src/qemu/qemu_block.c
+++ b/src/qemu/qemu_block.c
@@ -308,7 +308,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDef *disk,
int
qemuBlockNodeNamesDetect(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) disktable = NULL;
@@ -2120,7 +2120,7 @@ qemuBlockStorageSourceChainDetach(qemuMonitor *mon,
int
qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSource *src)
{
int ret;
@@ -2694,7 +2694,7 @@ qemuBlockStorageSourceCreateGeneric(virDomainObj *vm,
virStorageSource *src,
virStorageSource *chain,
bool storageCreate,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virJSONValue) props = createProps;
qemuDomainObjPrivate *priv = vm->privateData;
@@ -2749,7 +2749,7 @@ static int
qemuBlockStorageSourceCreateStorage(virDomainObj *vm,
virStorageSource *src,
virStorageSource *chain,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int actualType = virStorageSourceGetActualType(src);
g_autoptr(virJSONValue) createstorageprops = NULL;
@@ -2786,7 +2786,7 @@ qemuBlockStorageSourceCreateFormat(virDomainObj *vm,
virStorageSource *src,
virStorageSource *backingStore,
virStorageSource *chain,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virJSONValue) createformatprops = NULL;
int ret;
@@ -2836,7 +2836,7 @@ qemuBlockStorageSourceCreate(virDomainObj *vm,
virStorageSource *backingStore,
virStorageSource *chain,
qemuBlockStorageSourceAttachData *data,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret = -1;
@@ -3020,7 +3020,7 @@ qemuBlockNamedNodeDataGetBitmapByName(GHashTable
*blockNamedNodeData,
GHashTable *
qemuBlockGetNamedNodeData(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
@@ -3372,7 +3372,7 @@ qemuBlockReopenFormatMon(qemuMonitor *mon,
static int
qemuBlockReopenFormat(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
@@ -3413,7 +3413,7 @@ qemuBlockReopenFormat(virDomainObj *vm,
int
qemuBlockReopenReadWrite(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
if (!src->readonly)
return 0;
@@ -3442,7 +3442,7 @@ qemuBlockReopenReadWrite(virDomainObj *vm,
int
qemuBlockReopenReadOnly(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
if (src->readonly)
return 0;
diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h
index 184a549d5c..8eafb8482a 100644
--- a/src/qemu/qemu_block.h
+++ b/src/qemu/qemu_block.h
@@ -47,7 +47,7 @@ qemuBlockNodeNameGetBackingChain(virJSONValue *namednodesdata,
int
qemuBlockNodeNamesDetect(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
GHashTable *
qemuBlockGetNodeData(virJSONValue *data);
@@ -143,7 +143,7 @@ qemuBlockStorageSourceAttachRollback(qemuMonitor *mon,
int
qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSource *src);
struct _qemuBlockStorageSourceChainData {
@@ -213,7 +213,7 @@ qemuBlockStorageSourceCreate(virDomainObj *vm,
virStorageSource *backingStore,
virStorageSource *chain,
qemuBlockStorageSourceAttachData *data,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockStorageSourceCreateDetectSize(GHashTable *blockNamedNodeData,
@@ -233,7 +233,7 @@ qemuBlockNamedNodeDataGetBitmapByName(GHashTable *blockNamedNodeData,
GHashTable *
qemuBlockGetNamedNodeData(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockGetBitmapMergeActions(virStorageSource *topsrc,
@@ -272,11 +272,11 @@ qemuBlockReopenFormatMon(qemuMonitor *mon,
int
qemuBlockReopenReadWrite(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
qemuBlockReopenReadOnly(virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
bool
qemuBlockStorageSourceNeedsStorageSliceLayer(const virStorageSource *src);
diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c
index 87f8ae7b52..8c2205118f 100644
--- a/src/qemu/qemu_blockjob.c
+++ b/src/qemu/qemu_blockjob.c
@@ -565,7 +565,7 @@ qemuBlockJobRefreshJobs(virQEMUDriver *driver,
job->reconnected = true;
if (job->newstate != -1)
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
/* 'job' may be invalid after this update */
}
@@ -839,7 +839,7 @@ qemuBlockJobEventProcessLegacy(virQEMUDriver *driver,
static void
qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virStorageSource *chain)
{
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
@@ -942,7 +942,7 @@ qemuBlockJobClearConfigChain(virDomainObj *vm,
static int
qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
@@ -992,7 +992,7 @@ static void
qemuBlockJobProcessEventCompletedPull(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSource *base = NULL;
virStorageSource *baseparent = NULL;
@@ -1106,7 +1106,7 @@ qemuBlockJobDeleteImages(virQEMUDriver *driver,
static int
qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
@@ -1168,7 +1168,7 @@ static void
qemuBlockJobProcessEventCompletedCommit(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSource *baseparent = NULL;
virDomainDiskDef *cfgdisk = NULL;
@@ -1258,7 +1258,7 @@ static void
qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
virStorageSource *baseparent = NULL;
virDomainDiskDef *cfgdisk = NULL;
@@ -1329,7 +1329,7 @@ qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriver
*driver,
static int
qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
@@ -1366,7 +1366,7 @@ static void
qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name,
vm->def->name);
@@ -1402,7 +1402,7 @@ static void
qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -1438,7 +1438,7 @@ static void
qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainDiskDef *disk = job->disk;
@@ -1470,7 +1470,7 @@ static void
qemuBlockJobProcessEventConcludedCreate(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
@@ -1511,7 +1511,7 @@ static void
qemuBlockJobProcessEventConcludedBackup(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuBlockjobState newstate,
unsigned long long progressCurrent,
unsigned long long progressTotal)
@@ -1547,7 +1547,7 @@ static void
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobData *job,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned long long progressCurrent,
unsigned long long progressTotal)
{
@@ -1607,7 +1607,7 @@ static void
qemuBlockJobEventProcessConcluded(qemuBlockJobData *job,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuMonitorJobInfo **jobinfo = NULL;
size_t njobinfo = 0;
@@ -1688,7 +1688,7 @@ static void
qemuBlockJobEventProcess(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
switch ((qemuBlockjobState) job->newstate) {
diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c
index 2a495dfe08..a933230335 100644
--- a/src/qemu/qemu_checkpoint.c
+++ b/src/qemu/qemu_checkpoint.c
@@ -192,7 +192,7 @@ qemuCheckpointDiscardBitmaps(virDomainObj *vm,
actions = virJSONValueNewArray();
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
return -1;
for (i = 0; i < chkdef->ndisks; i++) {
@@ -229,7 +229,7 @@ qemuCheckpointDiscardBitmaps(virDomainObj *vm,
goto relabel;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN) &&
- qemuBlockReopenReadWrite(vm, src, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockReopenReadWrite(vm, src, VIR_ASYNC_JOB_NONE) < 0)
goto relabel;
relabelimages = g_slist_prepend(relabelimages, src);
@@ -244,7 +244,7 @@ qemuCheckpointDiscardBitmaps(virDomainObj *vm,
virStorageSource *src = next->data;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
- ignore_value(qemuBlockReopenReadOnly(vm, src, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuBlockReopenReadOnly(vm, src, VIR_ASYNC_JOB_NONE));
ignore_value(qemuDomainStorageSourceAccessAllow(driver, vm, src,
true, false, false));
@@ -417,7 +417,7 @@ qemuCheckpointRedefineValidateBitmaps(virDomainObj *vm,
if (virDomainObjCheckActive(vm) < 0)
return -1;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
return -1;
for (i = 0; i < chkdef->ndisks; i++) {
@@ -607,7 +607,7 @@ qemuCheckpointCreateXML(virDomainPtr domain,
/* Unlike snapshots, the RNG schema already ensured a sane filename. */
/* We are going to modify the domain below. */
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return NULL;
if (redefine) {
@@ -658,13 +658,13 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObj *vm,
size_t i;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (!(nodedataMerge = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(nodedataMerge = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
/* enumerate disks relevant for the checkpoint which are also present in the
@@ -741,7 +741,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObj *vm,
goto endjob;
/* now do a final refresh */
- if (!(nodedataStats = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE)))
+ if (!(nodedataStats = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE)))
goto endjob;
qemuDomainObjEnterMonitor(driver, vm);
@@ -852,7 +852,7 @@ qemuCheckpointDelete(virDomainObj *vm,
VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY |
VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (!metadata_only) {
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 3bf864bc5d..a587ebe86d 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -210,7 +210,7 @@ qemuDomainFormatJobPrivate(virBuffer *buf,
{
qemuDomainJobPrivate *priv = job->privateData;
- if (job->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
if (qemuDomainObjPrivateXMLFormatNBDMigration(buf, vm) < 0)
return -1;
@@ -284,7 +284,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObj *vm,
return -1;
if (n > 0) {
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_WARN("Found disks marked for migration but we were not "
"migrating");
n = 0;
@@ -5858,11 +5858,11 @@ qemuDomainSaveConfig(virDomainObj *obj)
static int
qemuDomainObjEnterMonitorInternal(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = obj->privateData;
- if (asyncJob != QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob != VIR_ASYNC_JOB_NONE) {
int ret;
if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0)
return ret;
@@ -5878,7 +5878,7 @@ qemuDomainObjEnterMonitorInternal(virQEMUDriver *driver,
} else if (priv->job.owner != virThreadSelfID()) {
VIR_WARN("Entering a monitor without owning a job. "
"Job %s owner %s (%llu)",
- qemuDomainJobTypeToString(priv->job.active),
+ virDomainJobTypeToString(priv->job.active),
priv->job.ownerAPI, priv->job.owner);
}
@@ -5918,7 +5918,7 @@ qemuDomainObjExitMonitor(virDomainObj *obj)
if (!hasRefs)
priv->mon = NULL;
- if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ if (priv->job.active == VIR_JOB_ASYNC_NESTED)
qemuDomainObjEndJob(obj);
}
@@ -5926,7 +5926,7 @@ void qemuDomainObjEnterMonitor(virQEMUDriver *driver,
virDomainObj *obj)
{
ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj,
- QEMU_ASYNC_JOB_NONE));
+ VIR_ASYNC_JOB_NONE));
}
/*
@@ -5935,7 +5935,7 @@ void qemuDomainObjEnterMonitor(virQEMUDriver *driver,
* To be called immediately before any QEMU monitor API call.
* Must have already either called qemuDomainObjBeginJob()
* and checked that the VM is still active, with asyncJob of
- * QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
+ * VIR_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
* with the same asyncJob.
*
* Returns 0 if job was started, in which case this must be followed with
@@ -5946,7 +5946,7 @@ void qemuDomainObjEnterMonitor(virQEMUDriver *driver,
int
qemuDomainObjEnterMonitorAsync(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob);
}
@@ -7135,7 +7135,7 @@ qemuDomainRemoveInactiveLocked(virQEMUDriver *driver,
* qemuDomainRemoveInactiveJob:
*
* Just like qemuDomainRemoveInactive but it tries to grab a
- * QEMU_JOB_MODIFY first. Even though it doesn't succeed in
+ * VIR_JOB_MODIFY first. Even though it doesn't succeed in
* grabbing the job the control carries with
* qemuDomainRemoveInactive call.
*/
@@ -7145,7 +7145,7 @@ qemuDomainRemoveInactiveJob(virQEMUDriver *driver,
{
bool haveJob;
- haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
+ haveJob = qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) >= 0;
qemuDomainRemoveInactive(driver, vm);
@@ -7166,7 +7166,7 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriver *driver,
{
bool haveJob;
- haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
+ haveJob = qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) >= 0;
qemuDomainRemoveInactiveLocked(driver, vm);
@@ -10071,7 +10071,7 @@ qemuDomainVcpuPersistOrder(virDomainDef *def)
int
qemuDomainCheckMonitor(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index edafb585b3..a5d6705571 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -500,7 +500,7 @@ void qemuDomainObjExitMonitor(virDomainObj *obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainObjEnterMonitorAsync(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
@@ -892,7 +892,7 @@ void qemuDomainVcpuPersistOrder(virDomainDef *def)
int qemuDomainCheckMonitor(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
bool qemuDomainSupportsVideoVga(const virDomainVideoDef *video,
virQEMUCaps *qemuCaps);
diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c
index cf1e093e22..71876fe6a3 100644
--- a/src/qemu/qemu_domainjob.c
+++ b/src/qemu/qemu_domainjob.c
@@ -31,38 +31,6 @@
VIR_LOG_INIT("qemu.qemu_domainjob");
-VIR_ENUM_IMPL(qemuDomainJob,
- QEMU_JOB_LAST,
- "none",
- "query",
- "destroy",
- "suspend",
- "modify",
- "abort",
- "migration operation",
- "none", /* async job is never stored in job.active */
- "async nested",
-);
-
-VIR_ENUM_IMPL(qemuDomainAgentJob,
- QEMU_AGENT_JOB_LAST,
- "none",
- "query",
- "modify",
-);
-
-VIR_ENUM_IMPL(qemuDomainAsyncJob,
- QEMU_ASYNC_JOB_LAST,
- "none",
- "migration out",
- "migration in",
- "save",
- "dump",
- "snapshot",
- "start",
- "backup",
-);
-
static void *
qemuJobDataAllocPrivateData(void)
{
@@ -106,22 +74,22 @@ qemuDomainJobSetStatsType(virDomainJobData *jobData,
const char *
-qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
int phase G_GNUC_UNUSED)
{
switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeToString(phase);
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_BACKUP:
G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
break;
}
@@ -129,25 +97,25 @@ qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
}
int
-qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
const char *phase)
{
if (!phase)
return 0;
switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeFromString(phase);
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_BACKUP:
G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
break;
}
@@ -211,7 +179,7 @@ qemuDomainObjInitJob(qemuDomainJobObj *job,
static void
qemuDomainObjResetJob(qemuDomainJobObj *job)
{
- job->active = QEMU_JOB_NONE;
+ job->active = VIR_JOB_NONE;
job->owner = 0;
g_clear_pointer(&job->ownerAPI, g_free);
job->started = 0;
@@ -221,7 +189,7 @@ qemuDomainObjResetJob(qemuDomainJobObj *job)
static void
qemuDomainObjResetAgentJob(qemuDomainJobObj *job)
{
- job->agentActive = QEMU_AGENT_JOB_NONE;
+ job->agentActive = VIR_AGENT_JOB_NONE;
job->agentOwner = 0;
g_clear_pointer(&job->agentOwnerAPI, g_free);
job->agentStarted = 0;
@@ -231,7 +199,7 @@ qemuDomainObjResetAgentJob(qemuDomainJobObj *job)
static void
qemuDomainObjResetAsyncJob(qemuDomainJobObj *job)
{
- job->asyncJob = QEMU_ASYNC_JOB_NONE;
+ job->asyncJob = VIR_ASYNC_JOB_NONE;
job->asyncOwner = 0;
g_clear_pointer(&job->asyncOwnerAPI, g_free);
job->asyncStarted = 0;
@@ -286,7 +254,7 @@ qemuDomainObjClearJob(qemuDomainJobObj *job)
}
bool
-qemuDomainTrackJob(qemuDomainJob job)
+qemuDomainTrackJob(virDomainJob job)
{
return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
}
@@ -713,14 +681,14 @@ qemuDomainObjSetJobPhase(virDomainObj *obj,
return;
VIR_DEBUG("Setting '%s' phase to '%s'",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
if (priv->job.asyncOwner == 0) {
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
} else if (me != priv->job.asyncOwner) {
VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
@@ -738,7 +706,7 @@ qemuDomainObjSetAsyncJobMask(virDomainObj *obj,
if (!priv->job.asyncJob)
return;
- priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
+ priv->job.mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
}
void
@@ -746,7 +714,7 @@ qemuDomainObjDiscardAsyncJob(virDomainObj *obj)
{
qemuDomainObjPrivate *priv = obj->privateData;
- if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ if (priv->job.active == VIR_JOB_ASYNC_NESTED)
qemuDomainObjResetJob(&priv->job);
qemuDomainObjResetAsyncJob(&priv->job);
qemuDomainSaveStatus(obj);
@@ -758,33 +726,33 @@ qemuDomainObjReleaseAsyncJob(virDomainObj *obj)
qemuDomainObjPrivate *priv = obj->privateData;
VIR_DEBUG("Releasing ownership of '%s' async job",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.asyncOwner != virThreadSelfID()) {
VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
priv->job.asyncOwner = 0;
}
static bool
-qemuDomainNestedJobAllowed(qemuDomainJobObj *jobs, qemuDomainJob newJob)
+qemuDomainNestedJobAllowed(qemuDomainJobObj *jobs, virDomainJob newJob)
{
return !jobs->asyncJob ||
- newJob == QEMU_JOB_NONE ||
+ newJob == VIR_JOB_NONE ||
(jobs->mask & JOB_MASK(newJob)) != 0;
}
static bool
qemuDomainObjCanSetJob(qemuDomainJobObj *job,
- qemuDomainJob newJob,
- qemuDomainAgentJob newAgentJob)
+ virDomainJob newJob,
+ virDomainAgentJob newAgentJob)
{
- return ((newJob == QEMU_JOB_NONE ||
- job->active == QEMU_JOB_NONE) &&
- (newAgentJob == QEMU_AGENT_JOB_NONE ||
- job->agentActive == QEMU_AGENT_JOB_NONE));
+ return ((newJob == VIR_JOB_NONE ||
+ job->active == VIR_JOB_NONE) &&
+ (newAgentJob == VIR_AGENT_JOB_NONE ||
+ job->agentActive == VIR_AGENT_JOB_NONE));
}
/* Give up waiting for mutex after 30 seconds */
@@ -794,8 +762,8 @@ qemuDomainObjCanSetJob(qemuDomainJobObj *job,
* qemuDomainObjBeginJobInternal:
* @driver: qemu driver
* @obj: domain object
- * @job: qemuDomainJob to start
- * @asyncJob: qemuDomainAsyncJob to start
+ * @job: virDomainJob to start
+ * @asyncJob: virDomainAsyncJob to start
* @nowait: don't wait trying to acquire @job
*
* Acquires job for a domain object which must be locked before
@@ -815,16 +783,16 @@ qemuDomainObjCanSetJob(qemuDomainJobObj *job,
static int ATTRIBUTE_NONNULL(1)
qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job,
- qemuDomainAgentJob agentJob,
- qemuDomainAsyncJob asyncJob,
+ virDomainJob job,
+ virDomainAgentJob agentJob,
+ virDomainAsyncJob asyncJob,
bool nowait)
{
qemuDomainObjPrivate *priv = obj->privateData;
unsigned long long now;
unsigned long long then;
- bool nested = job == QEMU_JOB_ASYNC_NESTED;
- bool async = job == QEMU_JOB_ASYNC;
+ bool nested = job == VIR_JOB_ASYNC_NESTED;
+ bool async = job == VIR_JOB_ASYNC;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
const char *blocker = NULL;
const char *agentBlocker = NULL;
@@ -837,13 +805,13 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
VIR_DEBUG("Starting job: API=%s job=%s agentJob=%s asyncJob=%s "
"(vm=%p name=%s, current job=%s agentJob=%s async=%s)",
NULLSTR(currentAPI),
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAgentJobTypeToString(priv->job.agentActive),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAgentJobTypeToString(priv->job.agentActive),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
if (virTimeMillisNow(&now) < 0)
return -1;
@@ -852,7 +820,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
then = now + QEMU_JOB_WAIT_TIME;
retry:
- if ((!async && job != QEMU_JOB_DESTROY) &&
+ if ((!async && job != VIR_JOB_DESTROY) &&
cfg->maxQueuedJobs &&
priv->job.jobsQueued > cfg->maxQueuedJobs) {
goto error;
@@ -886,10 +854,10 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
if (job) {
qemuDomainObjResetJob(&priv->job);
- if (job != QEMU_JOB_ASYNC) {
+ if (job != VIR_JOB_ASYNC) {
VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->job.active = job;
priv->job.owner = virThreadSelfID();
@@ -897,7 +865,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
priv->job.started = now;
} else {
VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name);
qemuDomainObjResetAsyncJob(&priv->job);
priv->job.current =
virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
@@ -914,10 +882,10 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
qemuDomainObjResetAgentJob(&priv->job);
VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
+ virDomainAgentJobTypeToString(agentJob),
obj, obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
priv->job.agentActive = agentJob;
priv->job.agentOwner = virThreadSelfID();
priv->job.agentOwnerAPI = g_strdup(virThreadJobGet());
@@ -942,14 +910,14 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
"current job is (%s, %s, %s) "
"owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) "
"for (%llus, %llus, %llus)",
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(asyncJob),
NULLSTR(currentAPI),
obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAgentJobTypeToString(priv->job.agentActive),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAgentJobTypeToString(priv->job.agentActive),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.owner, NULLSTR(priv->job.ownerAPI),
priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI),
priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
@@ -1032,11 +1000,11 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
*/
int qemuDomainObjBeginJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
{
if (qemuDomainObjBeginJobInternal(driver, obj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, false) < 0)
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
return 0;
}
@@ -1051,23 +1019,23 @@ int qemuDomainObjBeginJob(virQEMUDriver *driver,
int
qemuDomainObjBeginAgentJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAgentJob agentJob)
+ virDomainAgentJob agentJob)
{
- return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE,
+ return qemuDomainObjBeginJobInternal(driver, obj, VIR_JOB_NONE,
agentJob,
- QEMU_ASYNC_JOB_NONE, false);
+ VIR_ASYNC_JOB_NONE, false);
}
int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobOperation operation,
unsigned long apiFlags)
{
qemuDomainObjPrivate *priv;
- if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
- QEMU_AGENT_JOB_NONE,
+ if (qemuDomainObjBeginJobInternal(driver, obj, VIR_JOB_ASYNC,
+ VIR_AGENT_JOB_NONE,
asyncJob, false) < 0)
return -1;
@@ -1080,7 +1048,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
int
qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = obj->privateData;
@@ -1097,9 +1065,9 @@ qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
}
return qemuDomainObjBeginJobInternal(driver, obj,
- QEMU_JOB_ASYNC_NESTED,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE,
+ VIR_JOB_ASYNC_NESTED,
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE,
false);
}
@@ -1108,7 +1076,7 @@ qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
*
* @driver: qemu driver
* @obj: domain object
- * @job: qemuDomainJob to start
+ * @job: virDomainJob to start
*
* Acquires job for a domain object which must be locked before
* calling. If there's already a job running it returns
@@ -1119,11 +1087,11 @@ qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
int
qemuDomainObjBeginJobNowait(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
{
return qemuDomainObjBeginJobInternal(driver, obj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, true);
+ VIR_AGENT_JOB_NONE,
+ VIR_ASYNC_JOB_NONE, true);
}
/*
@@ -1136,13 +1104,13 @@ void
qemuDomainObjEndJob(virDomainObj *obj)
{
qemuDomainObjPrivate *priv = obj->privateData;
- qemuDomainJob job = priv->job.active;
+ virDomainJob job = priv->job.active;
priv->job.jobsQueued--;
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetJob(&priv->job);
@@ -1157,13 +1125,13 @@ void
qemuDomainObjEndAgentJob(virDomainObj *obj)
{
qemuDomainObjPrivate *priv = obj->privateData;
- qemuDomainAgentJob agentJob = priv->job.agentActive;
+ virDomainAgentJob agentJob = priv->job.agentActive;
priv->job.jobsQueued--;
VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAgentJobTypeToString(agentJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetAgentJob(&priv->job);
@@ -1180,7 +1148,7 @@ qemuDomainObjEndAsyncJob(virDomainObj *obj)
priv->job.jobsQueued--;
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetAsyncJob(&priv->job);
@@ -1194,7 +1162,7 @@ qemuDomainObjAbortAsyncJob(virDomainObj *obj)
qemuDomainObjPrivate *priv = obj->privateData;
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->job.abortJob = true;
@@ -1208,26 +1176,26 @@ qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
qemuDomainObjPrivate *priv = vm->privateData;
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
- qemuDomainJob job = priv->job.active;
+ virDomainJob job = priv->job.active;
if (!qemuDomainTrackJob(job))
- job = QEMU_JOB_NONE;
+ job = VIR_JOB_NONE;
- if (job == QEMU_JOB_NONE &&
- priv->job.asyncJob == QEMU_ASYNC_JOB_NONE)
+ if (job == VIR_JOB_NONE &&
+ priv->job.asyncJob == VIR_ASYNC_JOB_NONE)
return 0;
virBufferAsprintf(&attrBuf, " type='%s' async='%s'",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(job),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.phase) {
virBufferAsprintf(&attrBuf, " phase='%s'",
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob,
priv->job.phase));
}
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE)
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE)
virBufferAsprintf(&attrBuf, " flags='0x%lx'",
priv->job.apiFlags);
if (priv->job.cb &&
@@ -1255,7 +1223,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
if ((tmp = virXPathString("string(@type)", ctxt))) {
int type;
- if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
+ if ((type = virDomainJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job type %s"), tmp);
return -1;
@@ -1267,7 +1235,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
if ((tmp = virXPathString("string(@async)", ctxt))) {
int async;
- if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
+ if ((async = virDomainAsyncJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown async job type %s"), tmp);
return -1;
@@ -1276,7 +1244,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
priv->job.asyncJob = async;
if ((tmp = virXPathString("string(@phase)", ctxt))) {
- priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
+ priv->job.phase = virDomainAsyncJobPhaseFromString(async, tmp);
if (priv->job.phase < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job phase %s"), tmp);
diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h
index bec6e3a61c..6520b42c80 100644
--- a/src/qemu/qemu_domainjob.h
+++ b/src/qemu/qemu_domainjob.h
@@ -24,61 +24,14 @@
#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
#define QEMU_JOB_DEFAULT_MASK \
- (JOB_MASK(QEMU_JOB_QUERY) | \
- JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ABORT))
+ (JOB_MASK(VIR_JOB_QUERY) | \
+ JOB_MASK(VIR_JOB_DESTROY) | \
+ JOB_MASK(VIR_JOB_ABORT))
/* Jobs which have to be tracked in domain state XML. */
#define QEMU_DOMAIN_TRACK_JOBS \
- (JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ASYNC))
-
-/* Only 1 job is allowed at any time
- * A job includes *all* monitor commands, even those just querying
- * information, not merely actions */
-typedef enum {
- QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
- QEMU_JOB_QUERY, /* Doesn't change any state */
- QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
- QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
- QEMU_JOB_MODIFY, /* May change state */
- QEMU_JOB_ABORT, /* Abort current async job */
- QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
-
- /* The following two items must always be the last items before JOB_LAST */
- QEMU_JOB_ASYNC, /* Asynchronous job */
- QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
-
- QEMU_JOB_LAST
-} qemuDomainJob;
-VIR_ENUM_DECL(qemuDomainJob);
-
-typedef enum {
- QEMU_AGENT_JOB_NONE = 0, /* No agent job. */
- QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */
- QEMU_AGENT_JOB_MODIFY, /* May change state of domain */
-
- QEMU_AGENT_JOB_LAST
-} qemuDomainAgentJob;
-VIR_ENUM_DECL(qemuDomainAgentJob);
-
-/* Async job consists of a series of jobs that may change state. Independent
- * jobs that do not change state (and possibly others if explicitly allowed by
- * current async job) are allowed to be run even if async job is active.
- */
-typedef enum {
- QEMU_ASYNC_JOB_NONE = 0,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- QEMU_ASYNC_JOB_SAVE,
- QEMU_ASYNC_JOB_DUMP,
- QEMU_ASYNC_JOB_SNAPSHOT,
- QEMU_ASYNC_JOB_START,
- QEMU_ASYNC_JOB_BACKUP,
-
- QEMU_ASYNC_JOB_LAST
-} qemuDomainAsyncJob;
-VIR_ENUM_DECL(qemuDomainAsyncJob);
+ (JOB_MASK(VIR_JOB_DESTROY) | \
+ JOB_MASK(VIR_JOB_ASYNC))
typedef enum {
@@ -144,21 +97,21 @@ struct _qemuDomainJobObj {
int jobsQueued;
- /* The following members are for QEMU_JOB_* */
- qemuDomainJob active; /* Currently running job */
+ /* The following members are for VIR_JOB_* */
+ virDomainJob active; /* Currently running job */
unsigned long long owner; /* Thread id which set current job */
char *ownerAPI; /* The API which owns the job */
unsigned long long started; /* When the current job started */
- /* The following members are for QEMU_AGENT_JOB_* */
- qemuDomainAgentJob agentActive; /* Currently running agent job */
+ /* The following members are for VIR_AGENT_JOB_* */
+ virDomainAgentJob agentActive; /* Currently running agent job */
unsigned long long agentOwner; /* Thread id which set current agent job */
char *agentOwnerAPI; /* The API which owns the agent job */
unsigned long long agentStarted; /* When the current agent job started */
- /* The following members are for QEMU_ASYNC_JOB_* */
+ /* The following members are for VIR_ASYNC_JOB_* */
virCond asyncCond; /* Use to coordinate with async jobs */
- qemuDomainAsyncJob asyncJob; /* Currently active async job */
+ virDomainAsyncJob asyncJob; /* Currently active async job */
unsigned long long asyncOwner; /* Thread which set current async job */
char *asyncOwnerAPI; /* The API which owns the async job */
unsigned long long asyncStarted; /* When the current async job started */
@@ -177,9 +130,9 @@ struct _qemuDomainJobObj {
void qemuDomainJobSetStatsType(virDomainJobData *jobData,
qemuDomainJobStatsType type);
-const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+const char *virDomainAsyncJobPhaseToString(virDomainAsyncJob job,
int phase);
-int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+int virDomainAsyncJobPhaseFromString(virDomainAsyncJob job,
const char *phase);
void qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
@@ -187,25 +140,25 @@ void qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
int qemuDomainObjBeginJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginAgentJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAgentJob agentJob)
+ virDomainAgentJob agentJob)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginAsyncJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobOperation operation,
unsigned long apiFlags)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginNestedJob(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
G_GNUC_WARN_UNUSED_RESULT;
int qemuDomainObjBeginJobNowait(virQEMUDriver *driver,
virDomainObj *obj,
- qemuDomainJob job)
+ virDomainJob job)
G_GNUC_WARN_UNUSED_RESULT;
void qemuDomainObjEndJob(virDomainObj *obj);
@@ -235,7 +188,7 @@ int qemuDomainJobDataToParams(virDomainJobData *jobData,
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
-bool qemuDomainTrackJob(qemuDomainJob job);
+bool qemuDomainTrackJob(virDomainJob job);
void qemuDomainObjClearJob(qemuDomainJobObj *job);
G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(qemuDomainJobObj, qemuDomainObjClearJob);
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index b7e83c769a..77012eb527 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -157,7 +157,7 @@ static int qemuDomainObjStart(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
unsigned int flags,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
static int qemuDomainManagedSaveLoad(virDomainObj *vm,
void *opaque);
@@ -202,7 +202,7 @@ qemuAutostartDomain(virDomainObj *vm,
}
if (qemuDomainObjStart(NULL, driver, vm, flags,
- QEMU_ASYNC_JOB_START) < 0) {
+ VIR_ASYNC_JOB_START) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to autostart VM '%s': %s"),
vm->def->name, virGetLastErrorMessage());
@@ -1625,7 +1625,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn,
goto cleanup;
}
- if (qemuProcessStart(conn, driver, vm, NULL, QEMU_ASYNC_JOB_START,
+ if (qemuProcessStart(conn, driver, vm, NULL, VIR_ASYNC_JOB_START,
NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags) < 0) {
@@ -1679,15 +1679,15 @@ static int qemuDomainSuspend(virDomainPtr dom)
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_SUSPEND) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_SUSPEND) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
reason = VIR_DOMAIN_PAUSED_MIGRATION;
- else if (priv->job.asyncJob == QEMU_ASYNC_JOB_SNAPSHOT)
+ else if (priv->job.asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
reason = VIR_DOMAIN_PAUSED_SNAPSHOT;
else
reason = VIR_DOMAIN_PAUSED_USER;
@@ -1699,7 +1699,7 @@ static int qemuDomainSuspend(virDomainPtr dom)
goto endjob;
}
if (state != VIR_DOMAIN_PAUSED) {
- if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessStopCPUs(driver, vm, reason, VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
}
qemuDomainSaveStatus(vm);
@@ -1729,7 +1729,7 @@ static int qemuDomainResume(virDomainPtr dom)
if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -1751,7 +1751,7 @@ static int qemuDomainResume(virDomainPtr dom)
state == VIR_DOMAIN_PAUSED) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("resume operation failed"));
@@ -1782,7 +1782,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriver *driver,
QEMU_AGENT_SHUTDOWN_POWERDOWN;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
@@ -1815,7 +1815,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriver *driver,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
@@ -1914,7 +1914,7 @@ qemuDomainRebootAgent(virQEMUDriver *driver,
agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_MODIFY) < 0)
+ VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (!qemuDomainAgentAvailable(vm, agentForced))
@@ -1943,7 +1943,7 @@ qemuDomainRebootMonitor(virQEMUDriver *driver,
int ret = -1;
if (qemuDomainObjBeginJob(driver, vm,
- QEMU_JOB_MODIFY) < 0)
+ VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -2032,7 +2032,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags)
if (virDomainResetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -2090,7 +2090,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
reason == VIR_DOMAIN_PAUSED_STARTING_UP &&
!priv->beingDestroyed);
- if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY,
+ if (qemuProcessBeginStopJob(driver, vm, VIR_JOB_DESTROY,
!(flags & VIR_DOMAIN_DESTROY_GRACEFUL)) < 0)
goto cleanup;
@@ -2107,11 +2107,11 @@ qemuDomainDestroyFlags(virDomainPtr dom,
qemuDomainSetFakeReboot(vm, false);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, stopFlags);
+ VIR_ASYNC_JOB_NONE, stopFlags);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
@@ -2195,7 +2195,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long
newmem,
if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -2338,7 +2338,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int
period,
if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -2406,7 +2406,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int
flags)
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -2465,7 +2465,7 @@ static int qemuDomainSendKey(virDomainPtr domain,
if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -2644,7 +2644,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_SAVE,
VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0)
goto cleanup;
@@ -2661,7 +2661,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
was_running = true;
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
- QEMU_ASYNC_JOB_SAVE) < 0)
+ VIR_ASYNC_JOB_SAVE) < 0)
goto endjob;
if (!virDomainObjIsActive(vm)) {
@@ -2712,13 +2712,13 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
xml = NULL;
ret = qemuSaveImageCreate(driver, vm, path, data, compressor,
- flags, QEMU_ASYNC_JOB_SAVE);
+ flags, VIR_ASYNC_JOB_SAVE);
if (ret < 0)
goto endjob;
/* Shut it down */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_SAVED,
- QEMU_ASYNC_JOB_SAVE, 0);
+ VIR_ASYNC_JOB_SAVE, 0);
virDomainAuditStop(vm, "saved");
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SAVED);
@@ -2729,7 +2729,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
virErrorPreserveLast(&save_err);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
- QEMU_ASYNC_JOB_SAVE) < 0) {
+ VIR_ASYNC_JOB_SAVE) < 0) {
VIR_WARN("Unable to resume guest CPUs after save failure");
virObjectEventStateQueue(driver->domainEventState,
virDomainEventLifecycleNewFromObj(vm,
@@ -2977,7 +2977,7 @@ static int
qemuDumpToFd(virQEMUDriver *driver,
virDomainObj *vm,
int fd,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *dumpformat)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -3085,7 +3085,7 @@ doCoreDump(virQEMUDriver *driver,
if (STREQ(memory_dump_format, "elf"))
memory_dump_format = NULL;
- if (qemuDumpToFd(driver, vm, fd, QEMU_ASYNC_JOB_DUMP,
+ if (qemuDumpToFd(driver, vm, fd, VIR_ASYNC_JOB_DUMP,
memory_dump_format) < 0)
goto cleanup;
} else {
@@ -3100,7 +3100,7 @@ doCoreDump(virQEMUDriver *driver,
goto cleanup;
if (qemuMigrationSrcToFile(driver, vm, fd, compressor,
- QEMU_ASYNC_JOB_DUMP) < 0)
+ VIR_ASYNC_JOB_DUMP) < 0)
goto cleanup;
}
@@ -3150,7 +3150,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm,
- QEMU_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0)
goto cleanup;
@@ -3170,7 +3170,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
if (!(flags & VIR_DUMP_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP,
- QEMU_ASYNC_JOB_DUMP) < 0)
+ VIR_ASYNC_JOB_DUMP) < 0)
goto endjob;
paused = true;
@@ -3189,7 +3189,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
endjob:
if ((ret == 0) && (flags & VIR_DUMP_CRASH)) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED,
- QEMU_ASYNC_JOB_DUMP, 0);
+ VIR_ASYNC_JOB_DUMP, 0);
virDomainAuditStop(vm, "crashed");
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
@@ -3205,7 +3205,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
if (resume && virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_DUMP) < 0) {
+ VIR_ASYNC_JOB_DUMP) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
@@ -3264,7 +3264,7 @@ qemuDomainScreenshot(virDomainPtr dom,
if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -3384,7 +3384,7 @@ processWatchdogEvent(virQEMUDriver *driver,
switch (action) {
case VIR_DOMAIN_WATCHDOG_ACTION_DUMP:
if (qemuDomainObjBeginAsyncJob(driver, vm,
- QEMU_ASYNC_JOB_DUMP,
+ VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0) {
return;
@@ -3401,7 +3401,7 @@ processWatchdogEvent(virQEMUDriver *driver,
ret = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_DUMP);
+ VIR_ASYNC_JOB_DUMP);
if (ret < 0)
virReportError(VIR_ERR_OPERATION_FAILED,
@@ -3460,7 +3460,7 @@ processGuestPanicEvent(virQEMUDriver *driver,
bool removeInactive = false;
unsigned long flags = VIR_DUMP_MEMORY_ONLY;
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0)
return;
@@ -3495,7 +3495,7 @@ processGuestPanicEvent(virQEMUDriver *driver,
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED,
- QEMU_ASYNC_JOB_DUMP, 0);
+ VIR_ASYNC_JOB_DUMP, 0);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_CRASHED);
@@ -3540,7 +3540,7 @@ processDeviceDeletedEvent(virQEMUDriver *driver,
VIR_DEBUG("Removing device %s from domain %p %s",
devAlias, vm, vm->def->name);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -3777,7 +3777,7 @@ processNicRxFilterChangedEvent(virQEMUDriver *driver,
"from domain %p %s",
devAlias, vm, vm->def->name);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -3903,7 +3903,7 @@ processSerialChangedEvent(virQEMUDriver *driver,
memset(&dev, 0, sizeof(dev));
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -3955,7 +3955,7 @@ processBlockJobEvent(virQEMUDriver *driver,
virDomainDiskDef *disk;
g_autoptr(qemuBlockJobData) job = NULL;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -3977,7 +3977,7 @@ processBlockJobEvent(virQEMUDriver *driver,
job->newstate = status;
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
endjob:
qemuDomainObjEndJob(vm);
@@ -3989,7 +3989,7 @@ processJobStatusChangeEvent(virQEMUDriver *driver,
virDomainObj *vm,
qemuBlockJobData *job)
{
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -3997,7 +3997,7 @@ processJobStatusChangeEvent(virQEMUDriver *driver,
goto endjob;
}
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
endjob:
qemuDomainObjEndJob(vm);
@@ -4015,7 +4015,7 @@ processMonitorEOFEvent(virQEMUDriver *driver,
unsigned int stopFlags = 0;
virObjectEvent *event = NULL;
- if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY, true) < 0)
+ if (qemuProcessBeginStopJob(driver, vm, VIR_JOB_DESTROY, true) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -4032,7 +4032,7 @@ processMonitorEOFEvent(virQEMUDriver *driver,
auditReason = "failed";
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuMigrationDstErrorSave(driver, vm->def->name,
qemuMonitorLastError(priv->mon));
@@ -4040,7 +4040,7 @@ processMonitorEOFEvent(virQEMUDriver *driver,
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
eventReason);
- qemuProcessStop(driver, vm, stopReason, QEMU_ASYNC_JOB_NONE, stopFlags);
+ qemuProcessStop(driver, vm, stopReason, VIR_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(vm, auditReason);
virObjectEventStateQueue(driver->domainEventState, event);
@@ -4131,7 +4131,7 @@ processMemoryDeviceSizeChange(virQEMUDriver *driver,
virObjectEvent *event = NULL;
unsigned long long balloon;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return;
if (!virDomainObjIsActive(vm)) {
@@ -4347,10 +4347,10 @@ qemuDomainSetVcpusFlags(virDomainPtr dom,
if (useAgent) {
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
} else {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
}
@@ -4487,7 +4487,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -4613,7 +4613,7 @@ qemuDomainPinEmulator(virDomainPtr dom,
if (virDomainPinEmulatorEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -4789,7 +4789,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
goto cleanup;
if (flags & VIR_DOMAIN_VCPU_GUEST) {
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -4874,7 +4874,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriver *driver,
size_t i;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -5003,7 +5003,7 @@ qemuDomainPinIOThread(virDomainPtr dom,
if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -5402,7 +5402,7 @@ qemuDomainChgIOThread(virQEMUDriver *driver,
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -5830,7 +5830,7 @@ qemuDomainRestoreFlags(virConnectPtr conn,
goto cleanup;
ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path,
- false, reset_nvram, QEMU_ASYNC_JOB_START);
+ false, reset_nvram, VIR_ASYNC_JOB_START);
qemuProcessEndJob(vm);
@@ -6039,7 +6039,7 @@ qemuDomainObjRestore(virConnectPtr conn,
bool start_paused,
bool bypass_cache,
bool reset_nvram,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virDomainDef) def = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
@@ -6301,7 +6301,7 @@ qemuDomainObjStart(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
g_autofree char *managed_save = NULL;
@@ -6413,7 +6413,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags)
}
if (qemuDomainObjStart(dom->conn, driver, vm, flags,
- QEMU_ASYNC_JOB_START) < 0)
+ VIR_ASYNC_JOB_START) < 0)
goto endjob;
dom->id = vm->def->id;
@@ -6550,7 +6550,7 @@ qemuDomainUndefineFlags(virDomainPtr dom,
if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!vm->persistent) {
@@ -6824,7 +6824,7 @@ qemuDomainAttachDeviceLive(virDomainObj *vm,
}
if (ret == 0)
- ret = qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE);
+ ret = qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE);
return ret;
}
@@ -7811,7 +7811,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom,
if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -7866,7 +7866,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom,
if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -7994,7 +7994,7 @@ qemuDomainDetachDeviceLiveAndConfig(virQEMUDriver *driver,
if ((rc = qemuDomainDetachDeviceLive(vm, dev, driver, false)) < 0)
goto cleanup;
- if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm,
QEMU_ASYNC_JOB_NONE) < 0)
+ if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE)
< 0)
goto cleanup;
qemuDomainSaveStatus(vm);
@@ -8067,7 +8067,7 @@ qemuDomainDetachDeviceAliasLiveAndConfig(virQEMUDriver *driver,
if ((rc = qemuDomainDetachDeviceLive(vm, &dev, driver, true)) < 0)
return -1;
- if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm,
QEMU_ASYNC_JOB_NONE) < 0)
+ if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE)
< 0)
return -1;
}
@@ -8096,7 +8096,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom,
if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -8131,7 +8131,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom,
if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjUpdateModificationImpact(vm, &flags) < 0)
@@ -8204,7 +8204,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom,
autostart = (autostart != 0);
if (vm->autostart != autostart) {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name)))
@@ -8350,7 +8350,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -8524,7 +8524,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
/* QEMU and LXC implementation are identical */
@@ -8767,7 +8767,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom,
}
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -8987,7 +8987,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom,
if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -9062,7 +9062,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom,
if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!(def = virDomainObjGetOneDefState(vm, flags, &live)))
@@ -9248,7 +9248,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -9751,7 +9751,7 @@ qemuDomainBlockResize(virDomainPtr dom,
if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -9962,7 +9962,7 @@ qemuDomainBlockStats(virDomainPtr dom,
if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -10020,7 +10020,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom,
if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -10182,7 +10182,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom,
if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) <
0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -10491,7 +10491,7 @@ qemuDomainGetInterfaceParameters(virDomainPtr dom,
return ret;
}
-/* This functions assumes that job QEMU_JOB_QUERY is started by a caller */
+/* This functions assumes that job VIR_JOB_QUERY is started by a caller */
static int
qemuDomainMemoryStatsInternal(virQEMUDriver *driver,
virDomainObj *vm,
@@ -10547,7 +10547,7 @@ qemuDomainMemoryStats(virDomainPtr dom,
if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
ret = qemuDomainMemoryStatsInternal(driver, vm, stats, nr_stats);
@@ -10657,7 +10657,7 @@ qemuDomainMemoryPeek(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -10934,7 +10934,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom,
if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (!(disk = virDomainDiskByName(vm->def, path, false))) {
@@ -12428,13 +12428,13 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver,
jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events &&
jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
- qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationAnyFetchStats(driver, vm, VIR_ASYNC_JOB_NONE,
jobData, NULL) < 0)
return -1;
if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE &&
privStats->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
- qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationSrcFetchMirrorStats(driver, vm, VIR_ASYNC_JOB_NONE,
jobData) < 0)
return -1;
@@ -12456,7 +12456,7 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver,
qemuMonitorDumpStats stats = { 0 };
int rc;
- if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
rc = qemuMonitorQueryDump(priv->mon, &stats);
@@ -12518,14 +12518,14 @@ qemuDomainGetJobStatsInternal(virQEMUDriver *driver,
return 0;
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("migration statistics are available only on "
"the source host"));
return -1;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -12680,7 +12680,7 @@ static int qemuDomainAbortJob(virDomainPtr dom)
if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_ABORT) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -12689,24 +12689,24 @@ static int qemuDomainAbortJob(virDomainPtr dom)
priv = vm->privateData;
switch (priv->job.asyncJob) {
- case QEMU_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_NONE:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("no job is active on the domain"));
break;
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort incoming migration;"
" use virDomainDestroy instead"));
break;
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort VM start;"
" use virDomainDestroy instead"));
break;
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
if ((priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY ||
(virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY))) {
@@ -12718,11 +12718,11 @@ static int qemuDomainAbortJob(virDomainPtr dom)
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_SAVE:
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_DUMP:
if (priv->job.apiFlags & VIR_DUMP_MEMORY_ONLY) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort memory-only dump"));
@@ -12732,18 +12732,18 @@ static int qemuDomainAbortJob(virDomainPtr dom)
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SNAPSHOT:
ret = qemuDomainAbortJobMigration(vm);
break;
- case QEMU_ASYNC_JOB_BACKUP:
- qemuBackupJobCancelBlockjobs(vm, priv->backup, true, QEMU_ASYNC_JOB_NONE);
+ case VIR_ASYNC_JOB_BACKUP:
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, true, VIR_ASYNC_JOB_NONE);
ret = 0;
break;
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
default:
- virReportEnumRangeError(qemuDomainAsyncJob, priv->job.asyncJob);
+ virReportEnumRangeError(virDomainAsyncJob, priv->job.asyncJob);
break;
}
@@ -12776,7 +12776,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -12795,7 +12795,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
downtime) < 0)
goto endjob;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
@@ -12836,13 +12836,13 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom,
if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
goto endjob;
- if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(driver, vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto endjob;
@@ -12890,7 +12890,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -12906,7 +12906,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
}
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_XBZRLE_CACHE_SIZE))
{
- if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(driver, vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto endjob;
@@ -12952,7 +12952,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom,
if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -12977,7 +12977,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom,
cacheSize) < 0)
goto endjob;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
@@ -13039,7 +13039,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -13064,7 +13064,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
bandwidth * 1024 * 1024) < 0)
goto endjob;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_NONE,
migParams) < 0)
goto endjob;
} else {
@@ -13101,13 +13101,13 @@ qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriver *driver,
int rc;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
goto cleanup;
- if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuMigrationParamsFetch(driver, vm, VIR_ASYNC_JOB_NONE,
&migParams) < 0)
goto cleanup;
@@ -13196,7 +13196,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom,
if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MIGRATION_OP) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -13204,7 +13204,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom,
priv = vm->privateData;
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("post-copy can only be started while "
"outgoing migration is in progress"));
@@ -13941,7 +13941,7 @@ qemuDomainQemuMonitorCommandWithFiles(virDomainPtr domain,
if (virDomainQemuMonitorCommandWithFilesEnsureACL(domain->conn, vm->def) <
0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14285,7 +14285,7 @@ qemuDomainBlockPullCommon(virDomainObj *vm,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14411,7 +14411,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14458,13 +14458,13 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
qemuDomainSaveStatus(vm);
if (!async) {
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
while (qemuBlockJobIsRunning(job)) {
if (virDomainObjWait(vm) < 0) {
ret = -1;
goto endjob;
}
- qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE);
}
if (pivot &&
@@ -14486,7 +14486,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom,
endjob:
if (job && !async)
- qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE);
+ qemuBlockJobSyncEnd(vm, job, VIR_ASYNC_JOB_NONE);
qemuDomainObjEndJob(vm);
cleanup:
@@ -14573,7 +14573,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom,
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14643,7 +14643,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom,
if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -14844,7 +14844,7 @@ qemuDomainBlockCopyCommon(virDomainObj *vm,
return -1;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -15030,7 +15030,7 @@ qemuDomainBlockCopyCommon(virDomainObj *vm,
goto endjob;
}
} else {
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
QEMU_ASYNC_JOB_NONE)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
VIR_ASYNC_JOB_NONE)))
goto endjob;
if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
@@ -15069,7 +15069,7 @@ qemuDomainBlockCopyCommon(virDomainObj *vm,
if (crdata &&
qemuBlockStorageSourceCreate(vm, mirror, mirrorBacking,
mirror->backingStore,
- crdata->srcdata[0], QEMU_ASYNC_JOB_NONE) <
0)
+ crdata->srcdata[0], VIR_ASYNC_JOB_NONE) <
0)
goto endjob;
}
@@ -15346,7 +15346,7 @@ qemuDomainBlockCommit(virDomainPtr dom,
if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -15586,7 +15586,7 @@ qemuDomainOpenGraphics(virDomainPtr dom,
if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -15698,7 +15698,7 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom,
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1],
"graphicsfd",
@@ -15953,7 +15953,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom,
cfg = virQEMUDriverGetConfig(driver);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
priv = vm->privateData;
@@ -16234,7 +16234,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom,
if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
/* the API check guarantees that only one of the definitions will be set */
@@ -16378,7 +16378,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom,
if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -16453,7 +16453,7 @@ qemuDomainSetMetadata(virDomainPtr dom,
if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
ret = virDomainObjSetMetadata(vm, type, metadata, key, uri,
@@ -16576,7 +16576,7 @@ qemuDomainQueryWakeupSuspendSupport(virQEMUDriver *driver,
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE))
return -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if ((ret = virDomainObjCheckActive(vm)) < 0)
@@ -16598,7 +16598,7 @@ qemuDomainPMSuspendAgent(virQEMUDriver *driver,
qemuAgent *agent;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -16710,7 +16710,7 @@ qemuDomainPMWakeup(virDomainPtr dom,
if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -16766,7 +16766,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain,
if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -16861,7 +16861,7 @@ qemuDomainFSTrim(virDomainPtr dom,
if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -17032,7 +17032,7 @@ qemuDomainGetHostnameAgent(virQEMUDriver *driver,
qemuAgent *agent;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -17064,7 +17064,7 @@ qemuDomainGetHostnameLease(virQEMUDriver *driver,
size_t i, j;
int ret = -1;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -17176,7 +17176,7 @@ qemuDomainGetTime(virDomainPtr dom,
if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17213,7 +17213,7 @@ qemuDomainSetTimeAgent(virQEMUDriver *driver,
qemuAgent *agent;
int ret = -1;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0)
@@ -17270,7 +17270,7 @@ qemuDomainSetTime(virDomainPtr dom,
if (qemuDomainSetTimeAgent(driver, vm, seconds, nseconds, rtcSync) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17315,7 +17315,7 @@ qemuDomainFSFreeze(virDomainPtr dom,
if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17356,7 +17356,7 @@ qemuDomainFSThaw(virDomainPtr dom,
if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -17896,7 +17896,7 @@ qemuDomainGetStatsVcpu(virQEMUDriver *driver,
cpudelay = g_new0(unsigned long long, virDomainDefGetVcpus(dom->def));
if (HAVE_JOB(privflags) && virDomainObjIsActive(dom) &&
- qemuDomainRefreshVcpuHalted(driver, dom, QEMU_ASYNC_JOB_NONE) < 0) {
+ qemuDomainRefreshVcpuHalted(driver, dom, VIR_ASYNC_JOB_NONE) < 0) {
/* it's ok to be silent and go ahead, because halted vcpu info
* wasn't here from the beginning */
virResetLastError();
@@ -18802,9 +18802,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
int rv;
if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT)
- rv = qemuDomainObjBeginJobNowait(driver, vm, QEMU_JOB_QUERY);
+ rv = qemuDomainObjBeginJobNowait(driver, vm, VIR_JOB_QUERY);
else
- rv = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY);
+ rv = qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY);
if (rv == 0)
domflags |= QEMU_DOMAIN_STATS_HAVE_JOB;
@@ -18876,7 +18876,7 @@ qemuDomainGetFSInfoAgent(virQEMUDriver *driver,
qemuAgent *agent;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_QUERY) < 0)
+ VIR_AGENT_JOB_QUERY) < 0)
return ret;
if (virDomainObjCheckActive(vm) < 0)
@@ -18986,7 +18986,7 @@ qemuDomainGetFSInfo(virDomainPtr dom,
if ((nfs = qemuDomainGetFSInfoAgent(driver, vm, &agentinfo)) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -19037,7 +19037,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom,
break;
case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT:
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -19089,7 +19089,7 @@ qemuDomainSetUserPassword(virDomainPtr dom,
if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -19276,7 +19276,7 @@ static int qemuDomainRename(virDomainPtr dom,
if (virDomainRenameEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
@@ -19393,7 +19393,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom,
if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -19452,7 +19452,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom,
if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -19544,7 +19544,7 @@ qemuDomainSetVcpu(virDomainPtr dom,
if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -19603,7 +19603,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom,
if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -19626,7 +19626,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom,
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
!src->nodestorage &&
- qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockNodeNamesDetect(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
goto endjob;
if (!src->nodestorage) {
@@ -19794,7 +19794,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom,
if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
@@ -19942,7 +19942,7 @@ qemuDomainGetSEVInfo(virQEMUDriver *driver,
virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
return -1;
if (virDomainObjCheckActive(vm) < 0) {
@@ -20087,7 +20087,7 @@ qemuDomainSetLaunchSecurityState(virDomainPtr domain,
else if (rc == 1)
hasSetaddr = true;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -20433,7 +20433,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom,
goto cleanup;
if (qemuDomainObjBeginAgentJob(driver, vm,
- QEMU_AGENT_JOB_QUERY) < 0)
+ VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -20494,7 +20494,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom,
qemuDomainObjEndAgentJob(vm);
if (nfs > 0 || ndisks > 0) {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0)
@@ -20610,7 +20610,7 @@ qemuDomainAuthorizedSSHKeysGet(virDomainPtr dom,
if (virDomainAuthorizedSshKeysGetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -20651,7 +20651,7 @@ qemuDomainAuthorizedSSHKeysSet(virDomainPtr dom,
if (virDomainAuthorizedSshKeysSetEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_QUERY) < 0)
goto cleanup;
if (!qemuDomainAgentAvailable(vm, true))
@@ -20760,7 +20760,7 @@ qemuDomainStartDirtyRateCalc(virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0) {
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 8ea95406c7..3d1bb1be2a 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -369,7 +369,7 @@ qemuDomainChangeMediaLegacy(virQEMUDriver *driver,
int
qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virJSONValue) props = NULL;
@@ -414,7 +414,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
int
qemuHotplugRemoveDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
@@ -452,7 +452,7 @@ static int
qemuHotplugAttachManagedPR(virQEMUDriver *driver,
virDomainObj *vm,
virStorageSource *src,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virJSONValue) props = NULL;
@@ -502,7 +502,7 @@ qemuHotplugAttachManagedPR(virQEMUDriver *driver,
static int
qemuHotplugRemoveManagedPR(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virErrorPtr orig_err;
@@ -672,7 +672,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriver *driver,
if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0)
goto cleanup;
- if (qemuHotplugAttachManagedPR(driver, vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachManagedPR(driver, vm, newsrc, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
@@ -700,7 +700,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriver *driver,
/* remove PR manager object if unneeded */
if (managedpr)
- ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE));
/* revert old image do the disk definition */
if (oldsrc)
@@ -714,7 +714,7 @@ static qemuSnapshotDiskContext *
qemuDomainAttachDiskGenericTransient(virDomainObj *vm,
virDomainDiskDef *disk,
GHashTable *blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
g_autoptr(virDomainSnapshotDiskDef) snapdiskdef = NULL;
@@ -741,7 +741,7 @@ int
qemuDomainAttachDiskGeneric(virQEMUDriver *driver,
virDomainObj *vm,
virDomainDiskDef *disk,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
@@ -1089,10 +1089,10 @@ qemuDomainAttachDeviceDiskLiveInternal(virQEMUDriver *driver,
if (qemuDomainPrepareDiskSource(disk, priv, cfg) < 0)
goto cleanup;
- if (qemuHotplugAttachManagedPR(driver, vm, disk->src, QEMU_ASYNC_JOB_NONE) <
0)
+ if (qemuHotplugAttachManagedPR(driver, vm, disk->src, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
- ret = qemuDomainAttachDiskGeneric(driver, vm, disk, QEMU_ASYNC_JOB_NONE);
+ ret = qemuDomainAttachDiskGeneric(driver, vm, disk, VIR_ASYNC_JOB_NONE);
virDomainAuditDisk(vm, NULL, disk->src, "attach", ret == 0);
@@ -1113,7 +1113,7 @@ qemuDomainAttachDeviceDiskLiveInternal(virQEMUDriver *driver,
ignore_value(qemuDomainStorageSourceChainAccessRevoke(driver, vm,
disk->src));
if (virStorageSourceChainHasManagedPR(disk->src))
- ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE));
}
qemuDomainSecretDiskDestroy(disk);
@@ -1774,7 +1774,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriver *driver,
void
qemuDomainDelTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *secAlias,
const char *tlsAlias)
{
@@ -1805,7 +1805,7 @@ qemuDomainDelTLSObjects(virQEMUDriver *driver,
int
qemuDomainAddTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValue **secProps,
virJSONValue **tlsProps)
{
@@ -1907,7 +1907,7 @@ qemuDomainAddChardevTLSObjects(virQEMUDriver *driver,
dev->data.tcp.tlscreds = true;
- if (qemuDomainAddTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ if (qemuDomainAddTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
&secProps, &tlsProps) < 0)
return -1;
@@ -2013,7 +2013,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriver *driver,
ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias));
qemuDomainObjExitMonitor(vm);
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
@@ -2308,7 +2308,7 @@ qemuDomainAttachChrDevice(virQEMUDriver *driver,
qemuDomainObjExitMonitor(vm);
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
@@ -2414,7 +2414,7 @@ qemuDomainAttachRNGDevice(virQEMUDriver *driver,
qemuDomainObjExitMonitor(vm);
virErrorRestore(&orig_err);
- qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuDomainDelTLSObjects(driver, vm, VIR_ASYNC_JOB_NONE,
secAlias, tlsAlias);
goto audit;
}
@@ -2510,14 +2510,14 @@ qemuDomainAttachMemory(virQEMUDriver *driver,
virObjectEventStateQueue(driver->domainEventState, event);
/* fix the balloon size */
- ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuProcessRefreshBalloonState(driver, vm, VIR_ASYNC_JOB_NONE));
/* mem is consumed by vm->def */
mem = NULL;
/* this step is best effort, removing the device would be so much trouble */
ignore_value(qemuDomainUpdateMemoryDeviceInfo(driver, vm,
- QEMU_ASYNC_JOB_NONE));
+ VIR_ASYNC_JOB_NONE));
ret = 0;
@@ -4353,7 +4353,7 @@ qemuDomainChangeGraphics(virQEMUDriver *driver,
VIR_DOMAIN_GRAPHICS_TYPE_VNC,
&dev->data.vnc.auth,
cfg->vncPassword,
- QEMU_ASYNC_JOB_NONE) < 0)
+ VIR_ASYNC_JOB_NONE) < 0)
return -1;
/* Steal the new dev's char * reference */
@@ -4400,7 +4400,7 @@ qemuDomainChangeGraphics(virQEMUDriver *driver,
VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
&dev->data.spice.auth,
cfg->spicePassword,
- QEMU_ASYNC_JOB_NONE) < 0)
+ VIR_ASYNC_JOB_NONE) < 0)
return -1;
/* Steal the new dev's char * reference */
@@ -4532,7 +4532,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriver *driver,
qemuDomainStorageSourceChainAccessRevoke(driver, vm, disk->src);
if (virStorageSourceChainHasManagedPR(disk->src) &&
- qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuHotplugRemoveManagedPR(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
goto cleanup;
if (disk->transient) {
@@ -4619,7 +4619,7 @@ qemuDomainRemoveMemoryDevice(virQEMUDriver *driver,
virDomainMemoryDefFree(mem);
/* fix the balloon size */
- ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE));
+ ignore_value(qemuProcessRefreshBalloonState(driver, vm, VIR_ASYNC_JOB_NONE));
/* decrease the mlock limit after memory unplug if necessary */
ignore_value(qemuDomainAdjustMaxMemLock(vm, false));
@@ -6296,7 +6296,7 @@ qemuDomainRemoveVcpu(virQEMUDriver *driver,
virErrorPtr save_error = NULL;
size_t i;
- if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (qemuDomainRefreshVcpuInfo(driver, vm, VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
/* validation requires us to set the expected state prior to calling it */
@@ -6441,7 +6441,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriver *driver,
/* start outputting of the new XML element to allow keeping unpluggability */
vm->def->individualvcpus = true;
- if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
+ if (qemuDomainRefreshVcpuInfo(driver, vm, VIR_ASYNC_JOB_NONE, false) < 0)
return -1;
/* validation requires us to set the expected state prior to calling it */
diff --git a/src/qemu/qemu_hotplug.h b/src/qemu/qemu_hotplug.h
index 19c07497b5..a0a9ae47e2 100644
--- a/src/qemu/qemu_hotplug.h
+++ b/src/qemu/qemu_hotplug.h
@@ -33,13 +33,13 @@ int qemuDomainChangeEjectableMedia(virQEMUDriver *driver,
void qemuDomainDelTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *secAlias,
const char *tlsAlias);
int qemuDomainAddTLSObjects(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virJSONValue **secProps,
virJSONValue **tlsProps);
@@ -61,7 +61,7 @@ int qemuDomainAttachDeviceDiskLive(virQEMUDriver *driver,
int qemuDomainAttachDiskGeneric(virQEMUDriver *driver,
virDomainObj *vm,
virDomainDiskDef *disk,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuDomainAttachNetDevice(virQEMUDriver *driver,
virDomainObj *vm,
@@ -164,11 +164,11 @@ unsigned long long qemuDomainGetUnplugTimeout(virDomainObj *vm)
G_GNUC_NO_INLINE
int qemuHotplugAttachDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuHotplugRemoveDBusVMState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuDomainChangeMemoryRequestedSize(virQEMUDriver *driver,
virDomainObj *vm,
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index f109598fb4..43ffe2357a 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -84,7 +84,7 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase,
static int
qemuMigrationJobStart(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob job,
+ virDomainAsyncJob job,
unsigned long apiFlags)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
@@ -104,7 +104,7 @@ qemuMigrationJobContinue(virDomainObj *obj)
static bool
qemuMigrationJobIsActive(virDomainObj *vm,
- qemuDomainAsyncJob job)
+ virDomainAsyncJob job)
ATTRIBUTE_NONNULL(1);
static void
@@ -149,7 +149,7 @@ qemuMigrationSrcRestoreDomainState(virQEMUDriver *driver, virDomainObj
*vm)
/* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something
* to the logs and hope for the best */
@@ -501,7 +501,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriver *driver,
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto cleanup;
if (!server_started) {
@@ -542,7 +542,7 @@ qemuMigrationDstStopNBDServer(virQEMUDriver *driver,
return 0;
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
return -1;
if (qemuMonitorNBDServerStop(priv->mon) < 0)
@@ -583,7 +583,7 @@ qemuMigrationNBDReportMirrorError(qemuBlockJobData *job,
*/
static int
qemuMigrationSrcNBDStorageCopyReady(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
size_t i;
size_t notReady = 0;
@@ -638,7 +638,7 @@ qemuMigrationSrcNBDStorageCopyReady(virDomainObj *vm,
*/
static int
qemuMigrationSrcNBDCopyCancelled(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool abortMigration)
{
size_t i;
@@ -722,7 +722,7 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriver *driver,
virDomainDiskDef *disk,
qemuBlockJobData *job,
bool abortMigration,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rv;
@@ -772,7 +772,7 @@ static int
qemuMigrationSrcNBDCopyCancel(virQEMUDriver *driver,
virDomainObj *vm,
bool abortMigration,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn)
{
virErrorPtr err = NULL;
@@ -855,7 +855,7 @@ qemuMigrationSrcNBDCopyCancel(virQEMUDriver *driver,
static int
qemuMigrationSrcCancelRemoveTempBitmaps(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
@@ -952,7 +952,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriver *driver,
return -1;
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);
@@ -1001,7 +1001,7 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriver *driver,
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
@@ -1199,14 +1199,14 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
}
}
- while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
!= 1) {
+ while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) !=
1) {
if (rv < 0)
return -1;
if (priv->job.abortJob) {
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
return -1;
}
@@ -1221,7 +1221,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
return -1;
}
- qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationSrcFetchMirrorStats(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
priv->job.current);
return 0;
}
@@ -1599,7 +1599,7 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
if (state == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm,
VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
VIR_WARN("Unable to pause guest CPUs for %s",
vm->def->name);
} else {
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
@@ -1673,7 +1673,7 @@ qemuMigrationUpdateJobType(virDomainJobData *jobData)
int
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData,
char **error)
{
@@ -1703,23 +1703,23 @@ qemuMigrationJobName(virDomainObj *vm)
qemuDomainObjPrivate *priv = vm->privateData;
switch (priv->job.asyncJob) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
return _("migration out job");
- case QEMU_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_SAVE:
return _("domain save job");
- case QEMU_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_DUMP:
return _("domain core dump job");
- case QEMU_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_NONE:
return _("undefined");
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
return _("migration in job");
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SNAPSHOT:
return _("snapshot job");
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
return _("start job");
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_BACKUP:
return _("backup job");
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_LAST:
default:
return _("job");
}
@@ -1729,7 +1729,7 @@ qemuMigrationJobName(virDomainObj *vm)
static int
qemuMigrationJobCheckStatus(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainJobData *jobData = priv->job.current;
@@ -1793,7 +1793,7 @@ enum qemuMigrationCompletedFlags {
static int
qemuMigrationAnyCompleted(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn,
unsigned int flags)
{
@@ -1884,7 +1884,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
static int
qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virConnectPtr dconn,
unsigned int flags)
{
@@ -1925,7 +1925,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
priv->job.completed = virDomainJobDataCopy(jobData);
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
- if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
+ if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT &&
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
@@ -1936,7 +1936,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
static int
qemuMigrationDstWaitForCompletion(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool postcopy)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -2046,7 +2046,7 @@ qemuMigrationSrcGraphicsRelocate(virQEMUDriver *driver,
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
@@ -2139,7 +2139,7 @@ int
qemuMigrationDstRun(virQEMUDriver *driver,
virDomainObj *vm,
const char *uri,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rv;
@@ -2160,7 +2160,7 @@ qemuMigrationDstRun(virQEMUDriver *driver,
if (rv < 0)
return -1;
- if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
/* qemuMigrationDstWaitForCompletion is called from the Finish phase */
return 0;
}
@@ -2189,11 +2189,11 @@ qemuMigrationSrcCleanup(virDomainObj *vm,
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
vm->def->name, conn,
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobPhaseToString(priv->job.asyncJob,
priv->job.phase));
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
return;
VIR_DEBUG("The connection which started outgoing migration of domain %s"
@@ -2210,7 +2210,7 @@ qemuMigrationSrcCleanup(virDomainObj *vm,
VIR_WARN("Migration of domain %s finished but we don't know if
the"
" domain was successfully started on destination or not",
vm->def->name);
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
/* clear the job and let higher levels decide what to do */
qemuMigrationJobFinish(vm);
@@ -2344,11 +2344,11 @@ qemuMigrationSrcBeginPhase(virQEMUDriver *driver,
cookieout, cookieoutlen, nmigrate_disks,
migrate_disks, flags);
- /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
+ /* Only set the phase if we are inside VIR_ASYNC_JOB_MIGRATION_OUT.
* Otherwise we will start the async job later in the perform phase losing
* change protection.
*/
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3);
if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
@@ -2505,7 +2505,7 @@ qemuMigrationSrcBegin(virConnectPtr conn,
virQEMUDriver *driver = conn->privateData;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
char *xml = NULL;
- qemuDomainAsyncJob asyncJob;
+ virDomainAsyncJob asyncJob;
if (cfg->migrateTLSForce &&
!(flags & VIR_MIGRATE_TUNNELLED) &&
@@ -2516,14 +2516,14 @@ qemuMigrationSrcBegin(virConnectPtr conn,
}
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
- asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
+ asyncJob = VIR_ASYNC_JOB_MIGRATION_OUT;
} else {
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
- asyncJob = QEMU_ASYNC_JOB_NONE;
+ asyncJob = VIR_ASYNC_JOB_NONE;
}
qemuMigrationSrcStoreDomainState(vm);
@@ -2583,13 +2583,13 @@ qemuMigrationDstPrepareCleanup(virQEMUDriver *driver,
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
driver,
vm->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ virDomainJobTypeToString(priv->job.active),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob));
virPortAllocatorRelease(priv->migrationPort);
priv->migrationPort = 0;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN))
return;
qemuDomainObjDiscardAsyncJob(vm);
}
@@ -2694,7 +2694,7 @@ qemuMigrationDstPrepareAnyBlockDirtyBitmaps(virDomainObj *vm,
if (qemuMigrationCookieBlockDirtyBitmapsMatchDisks(vm->def,
mig->blockDirtyBitmaps) < 0)
return -1;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
QEMU_ASYNC_JOB_MIGRATION_IN)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
VIR_ASYNC_JOB_MIGRATION_IN)))
return -1;
for (nextdisk = mig->blockDirtyBitmaps; nextdisk; nextdisk = nextdisk->next) {
@@ -2925,7 +2925,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
!!(flags & VIR_MIGRATE_NON_SHARED_INC)) <
0)
goto cleanup;
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
flags) < 0)
goto cleanup;
qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PREPARE);
@@ -2942,7 +2942,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;
- if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessInit(driver, vm, mig->cpu, VIR_ASYNC_JOB_MIGRATION_IN,
true, startFlags) < 0)
goto stopjob;
stopProcess = true;
@@ -2958,7 +2958,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
goto stopjob;
- rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ rv = qemuProcessLaunch(dconn, driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
incoming, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
startFlags);
@@ -2987,7 +2987,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
if (qemuMigrationDstPrepareAnyBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
goto stopjob;
- if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
migParams, mig->caps->automatic) < 0)
goto stopjob;
@@ -2995,7 +2995,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
* set the migration TLS parameters */
if (flags & VIR_MIGRATE_TLS) {
if (qemuMigrationParamsEnableTLS(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
&tlsAlias, NULL,
migParams) < 0)
goto stopjob;
@@ -3004,7 +3004,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
goto stopjob;
}
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
migParams) < 0)
goto stopjob;
@@ -3042,10 +3042,10 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
if (incoming->deferredURI &&
qemuMigrationDstRun(driver, vm, incoming->deferredURI,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
goto stopjob;
@@ -3110,7 +3110,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
return ret;
stopjob:
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
jobPriv->migParams, priv->job.apiFlags);
if (stopProcess) {
@@ -3119,7 +3119,7 @@ qemuMigrationDstPrepareAny(virQEMUDriver *driver,
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
virDomainAuditStart(vm, "migrated", false);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
+ VIR_ASYNC_JOB_MIGRATION_IN, stopFlags);
}
qemuMigrationJobFinish(vm);
@@ -3425,7 +3425,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
*/
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
- qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationAnyFetchStats(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobData, NULL) < 0)
VIR_WARN("Could not refresh migration statistics");
@@ -3448,7 +3448,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
qemuMigrationSrcWaitForSpice(vm);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "migrated");
@@ -3465,7 +3465,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
/* cancel any outstanding NBD jobs */
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
+ VIR_ASYNC_JOB_MIGRATION_OUT, NULL);
virErrorRestore(&orig_err);
@@ -3475,7 +3475,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
else
qemuMigrationSrcRestoreDomainState(driver, vm);
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuDomainSaveStatus(vm);
@@ -3496,7 +3496,7 @@ qemuMigrationSrcConfirm(virQEMUDriver *driver,
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
int ret = -1;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
goto cleanup;
if (cancelled)
@@ -3816,7 +3816,7 @@ static int
qemuMigrationSrcContinue(virQEMUDriver *driver,
virDomainObj *vm,
qemuMonitorMigrationStatus status,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
@@ -3841,10 +3841,10 @@ qemuMigrationSetDBusVMState(virQEMUDriver *driver,
if (priv->dbusVMStateIds) {
int rv;
- if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugAttachDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
- if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
rv = qemuMonitorSetDBusVMStateIdList(priv->mon, priv->dbusVMStateIds);
@@ -3853,7 +3853,7 @@ qemuMigrationSetDBusVMState(virQEMUDriver *driver,
return rv;
} else {
- if (qemuHotplugRemoveDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuHotplugRemoveDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
}
@@ -3888,7 +3888,7 @@ qemuMigrationSrcRunPrepareBlockDirtyBitmapsMerge(virDomainObj *vm,
GSList *nextdisk;
int rc;
- if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
QEMU_ASYNC_JOB_MIGRATION_OUT)))
+ if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm,
VIR_ASYNC_JOB_MIGRATION_OUT)))
return -1;
for (nextdisk = mig->blockDirtyBitmaps; nextdisk; nextdisk = nextdisk->next) {
@@ -3944,7 +3944,7 @@ qemuMigrationSrcRunPrepareBlockDirtyBitmapsMerge(virDomainObj *vm,
}
}
- if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
return -1;
rc = qemuMonitorTransaction(priv->mon, &actions);
@@ -4107,7 +4107,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
qemuMigrationSrcRunPrepareBlockDirtyBitmaps(vm, mig, migParams, flags) < 0)
goto error;
- if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsCheck(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams, mig->caps->automatic) < 0)
goto error;
@@ -4121,7 +4121,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
hostname = spec->dest.host.name;
if (qemuMigrationParamsEnableTLS(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
&tlsAlias, hostname,
migParams) < 0)
goto error;
@@ -4135,7 +4135,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
priv->migMaxBandwidth * 1024 * 1024) < 0)
goto error;
- if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsApply(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
@@ -4188,12 +4188,12 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (!(flags & VIR_MIGRATE_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
if (priv->job.abortJob) {
@@ -4202,7 +4202,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
* priv->job.abortJob will not change */
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ virDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
goto exit_monitor;
}
@@ -4284,7 +4284,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn, waitFlags);
if (rc == -2) {
goto error;
@@ -4307,7 +4307,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (mig->nbd &&
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn) < 0)
goto error;
@@ -4318,13 +4318,13 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
if (qemuMigrationSrcContinue(driver, vm,
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn, waitFlags);
if (rc == -2) {
goto error;
@@ -4378,7 +4378,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (cancel &&
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED
&&
qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
+ VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(vm);
}
@@ -4386,10 +4386,10 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
/* cancel any outstanding NBD jobs */
if (mig && mig->nbd)
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
dconn);
- qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
@@ -5274,7 +5274,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
@@ -5314,7 +5314,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
*/
if (!v3proto) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
+ VIR_ASYNC_JOB_MIGRATION_OUT,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "migrated");
event = virDomainEventLifecycleNewFromObj(vm,
@@ -5330,7 +5330,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
* here
*/
if (!v3proto && ret < 0)
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationSrcRestoreDomainState(driver, vm);
@@ -5378,10 +5378,10 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
/* If we didn't start the job in the begin phase, start it now. */
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
- if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationJobStart(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
return ret;
- } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
+ } else if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) {
return ret;
}
@@ -5407,7 +5407,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
endjob:
if (ret < 0) {
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationJobFinish(vm);
} else {
@@ -5637,7 +5637,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
port = priv->migrationPort;
priv->migrationPort = 0;
- if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
+ if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN)) {
qemuMigrationDstErrorReport(driver, vm->def->name);
goto cleanup;
}
@@ -5673,7 +5673,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
/* Check for a possible error on the monitor in case Finish was called
* earlier than monitor EOF handler got a chance to process the error
*/
- qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuDomainCheckMonitor(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN);
goto endjob;
}
@@ -5694,7 +5694,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
goto endjob;
if (qemuRefreshVirtioChannelState(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0)
goto endjob;
if (qemuConnectAgent(driver, vm) < 0)
@@ -5723,7 +5723,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
* before starting guest CPUs.
*/
if (qemuMigrationDstWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
!!(flags & VIR_MIGRATE_POSTCOPY)) < 0)
{
/* There's not much we can do for v2 protocol since the
* original domain on the source host is already gone.
@@ -5734,7 +5734,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
/* Now that the state data was transferred we can refresh the actual state
* of the devices */
- if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
+ if (qemuProcessRefreshState(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
/* Similarly to the case above v2 protocol will not be able to recover
* from this. Let's ignore this and perhaps stuff will not break. */
if (v3proto)
@@ -5752,7 +5752,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
if (qemuProcessStartCPUs(driver, vm,
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
: VIR_DOMAIN_RUNNING_MIGRATED,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
+ VIR_ASYNC_JOB_MIGRATION_IN) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
@@ -5791,7 +5791,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
if (inPostCopy) {
if (qemuMigrationDstWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
false) < 0) {
goto endjob;
}
@@ -5838,7 +5838,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
virDomainObjIsActive(vm)) {
if (doKill) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_MIGRATION_IN,
+ VIR_ASYNC_JOB_MIGRATION_IN,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "failed");
event = virDomainEventLifecycleNewFromObj(vm,
@@ -5871,7 +5871,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
}
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN,
jobPriv->migParams, priv->job.apiFlags);
qemuMigrationJobFinish(vm);
@@ -5901,7 +5901,7 @@ int
qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
int fd,
virCommand *compressor,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool bwParam = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
@@ -6080,10 +6080,10 @@ qemuMigrationSrcCancel(virQEMUDriver *driver,
if (storage &&
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
- QEMU_ASYNC_JOB_NONE, NULL) < 0)
+ VIR_ASYNC_JOB_NONE, NULL) < 0)
return -1;
- if (qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
return 0;
@@ -6093,21 +6093,21 @@ qemuMigrationSrcCancel(virQEMUDriver *driver,
static int
qemuMigrationJobStart(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob job,
+ virDomainAsyncJob job,
unsigned long apiFlags)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainJobOperation op;
unsigned long long mask;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ if (job == VIR_ASYNC_JOB_MIGRATION_IN) {
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
- mask = QEMU_JOB_NONE;
+ mask = VIR_JOB_NONE;
} else {
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
mask = QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MIGRATION_OP);
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MIGRATION_OP);
}
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
@@ -6151,14 +6151,14 @@ qemuMigrationJobContinue(virDomainObj *vm)
static bool
qemuMigrationJobIsActive(virDomainObj *vm,
- qemuDomainAsyncJob job)
+ virDomainAsyncJob job)
{
qemuDomainObjPrivate *priv = vm->privateData;
if (priv->job.asyncJob != job) {
const char *msg;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (job == VIR_ASYNC_JOB_MIGRATION_IN)
msg = _("domain '%s' is not processing incoming
migration");
else
msg = _("domain '%s' is not being migrated");
@@ -6231,7 +6231,7 @@ qemuMigrationDstErrorReport(virQEMUDriver *driver,
int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData)
{
size_t i;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 6b169f73c7..a8afa66119 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -210,7 +210,7 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
virDomainObj *vm,
int fd,
virCommand *compressor,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
int
@@ -220,7 +220,7 @@ qemuMigrationSrcCancel(virQEMUDriver *driver,
int
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData,
char **error);
@@ -248,7 +248,7 @@ int
qemuMigrationDstRun(virQEMUDriver *driver,
virDomainObj *vm,
const char *uri,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
void
qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
@@ -257,5 +257,5 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriver *driver,
int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainJobData *jobData);
diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c
index 39f84983bc..df2384b213 100644
--- a/src/qemu/qemu_migration_params.c
+++ b/src/qemu/qemu_migration_params.c
@@ -850,7 +850,7 @@ qemuMigrationParamsApply(virQEMUDriver *driver,
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
- if (asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob == VIR_ASYNC_JOB_NONE) {
if (!virBitmapIsAllClear(migParams->caps)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Migration capabilities can only be set by "
@@ -1165,7 +1165,7 @@ qemuMigrationParamsCheck(virQEMUDriver *driver,
qemuMigrationParty party;
size_t i;
- if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
+ if (asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
party = QEMU_MIGRATION_SOURCE;
else
party = QEMU_MIGRATION_DESTINATION;
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 1ed60917ea..189e4671d1 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -462,7 +462,7 @@ qemuProcessFakeReboot(void *opaque)
VIR_DEBUG("vm=%p", vm);
virObjectLock(vm);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -484,7 +484,7 @@ qemuProcessFakeReboot(void *opaque)
if (qemuProcessStartCPUs(driver, vm,
reason,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
if (virGetLastErrorCode() == VIR_ERR_OK)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("resume operation failed"));
@@ -650,7 +650,7 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
* reveal it in domain state nor sent events */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
!priv->pausedShutdown) {
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
else
@@ -1525,7 +1525,7 @@ qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
priv = vm->privateData;
jobPriv = priv->job.privateData;
- if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
+ if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration
job");
goto cleanup;
}
@@ -1557,7 +1557,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
qemuMonitorMigrationStatusTypeToString(status));
priv = vm->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION event without a migration job");
goto cleanup;
}
@@ -1568,7 +1568,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
virDomainObjBroadcast(vm);
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
- priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
+ priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
@@ -1603,7 +1603,7 @@ qemuProcessHandleMigrationPass(qemuMonitor *mon G_GNUC_UNUSED,
vm, vm->def->name, pass);
priv = vm->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
goto cleanup;
}
@@ -1636,7 +1636,7 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
priv = vm->privateData;
jobPriv = priv->job.privateData;
privJobCurrent = priv->job.current->privateData;
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
goto cleanup;
}
@@ -1897,7 +1897,7 @@ qemuProcessMonitorLogFree(void *opaque)
static int
qemuProcessInitMonitor(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret;
@@ -2190,7 +2190,7 @@ qemuProcessRefreshChannelVirtioState(virQEMUDriver *driver,
int
qemuRefreshVirtioChannelState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(GHashTable) info = NULL;
@@ -2546,7 +2546,7 @@ qemuProcessInitCpuAffinity(virDomainObj *vm G_GNUC_UNUSED)
static int
qemuProcessSetLinkStates(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainDef *def = vm->def;
@@ -3210,7 +3210,7 @@ qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig,
int
qemuProcessStartCPUs(virQEMUDriver *driver, virDomainObj *vm,
virDomainRunningReason reason,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivate *priv = vm->privateData;
@@ -3261,7 +3261,7 @@ qemuProcessStartCPUs(virQEMUDriver *driver, virDomainObj *vm,
int qemuProcessStopCPUs(virQEMUDriver *driver,
virDomainObj *vm,
virDomainPausedReason reason,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivate *priv = vm->privateData;
@@ -3471,7 +3471,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriver *driver,
vm->def->name);
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
break;
@@ -3489,7 +3489,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriver *driver,
break;
}
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
@@ -3579,13 +3579,13 @@ qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name);
}
}
}
- qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
+ qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_NONE,
jobPriv->migParams, job->apiFlags);
return 0;
}
@@ -3604,21 +3604,21 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
state = virDomainObjGetState(vm, &reason);
switch (job->asyncJob) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case VIR_ASYNC_JOB_MIGRATION_OUT:
if (qemuProcessRecoverMigrationOut(driver, vm, job,
state, reason, stopFlags) < 0)
return -1;
break;
- case QEMU_ASYNC_JOB_MIGRATION_IN:
+ case VIR_ASYNC_JOB_MIGRATION_IN:
if (qemuProcessRecoverMigrationIn(driver, vm, job,
state, reason) < 0)
return -1;
break;
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
+ case VIR_ASYNC_JOB_SAVE:
+ case VIR_ASYNC_JOB_DUMP:
+ case VIR_ASYNC_JOB_SNAPSHOT:
qemuDomainObjEnterMonitor(driver, vm);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(vm);
@@ -3627,39 +3627,39 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
* recovering an async job, this function is run at startup
* and must resume things using sync monitor connections. */
if (state == VIR_DOMAIN_PAUSED &&
- ((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
+ ((job->asyncJob == VIR_ASYNC_JOB_DUMP &&
reason == VIR_DOMAIN_PAUSED_DUMP) ||
- (job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
+ (job->asyncJob == VIR_ASYNC_JOB_SAVE &&
reason == VIR_DOMAIN_PAUSED_SAVE) ||
- (job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT &&
+ (job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT &&
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
- QEMU_ASYNC_JOB_NONE) < 0) {
+ VIR_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain '%s' after migration to
file",
vm->def->name);
}
}
break;
- case QEMU_ASYNC_JOB_START:
+ case VIR_ASYNC_JOB_START:
/* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */
break;
- case QEMU_ASYNC_JOB_BACKUP:
+ case VIR_ASYNC_JOB_BACKUP:
ignore_value(virTimeMillisNow(&now));
/* Restore the config of the async job which is not persisted */
priv->job.jobsQueued++;
- priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP;
+ priv->job.asyncJob = VIR_ASYNC_JOB_BACKUP;
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
priv->job.asyncStarted = now;
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MODIFY)));
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MODIFY)));
/* We reset the job parameters for backup so that the job will look
* active. This is possible because we are able to recover the state
@@ -3673,8 +3673,8 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
priv->job.current->started = now;
break;
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_LAST:
+ case VIR_ASYNC_JOB_NONE:
+ case VIR_ASYNC_JOB_LAST:
break;
}
@@ -3686,32 +3686,32 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
* for the job to be properly tracked in domain state XML.
*/
switch (job->active) {
- case QEMU_JOB_QUERY:
+ case VIR_JOB_QUERY:
/* harmless */
break;
- case QEMU_JOB_DESTROY:
+ case VIR_JOB_DESTROY:
VIR_DEBUG("Domain %s should have already been destroyed",
vm->def->name);
return -1;
- case QEMU_JOB_SUSPEND:
+ case VIR_JOB_SUSPEND:
/* mostly harmless */
break;
- case QEMU_JOB_MODIFY:
+ case VIR_JOB_MODIFY:
/* XXX depending on the command we may be in an inconsistent state and
* we should probably fall back to "monitor error" state and refuse to
*/
break;
- case QEMU_JOB_MIGRATION_OP:
- case QEMU_JOB_ABORT:
- case QEMU_JOB_ASYNC:
- case QEMU_JOB_ASYNC_NESTED:
+ case VIR_JOB_MIGRATION_OP:
+ case VIR_JOB_ABORT:
+ case VIR_JOB_ASYNC:
+ case VIR_JOB_ASYNC_NESTED:
/* async job was already handled above */
- case QEMU_JOB_NONE:
- case QEMU_JOB_LAST:
+ case VIR_JOB_NONE:
+ case VIR_JOB_LAST:
break;
}
@@ -3727,7 +3727,7 @@ qemuProcessUpdateDevices(virQEMUDriver *driver,
g_auto(GStrv) old = g_steal_pointer(&priv->qemuDevices);
GStrv tmp;
- if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainUpdateDeviceList(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
if (!old)
@@ -4250,7 +4250,7 @@ qemuProcessGetVCPUQOMPath(virDomainObj *vm)
static int
qemuProcessFetchGuestCPU(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virCPUData **enabled,
virCPUData **disabled)
{
@@ -4358,7 +4358,7 @@ qemuProcessUpdateLiveGuestCPU(virDomainObj *vm,
static int
qemuProcessUpdateAndVerifyCPU(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virCPUData) cpu = NULL;
g_autoptr(virCPUData) disabled = NULL;
@@ -4379,7 +4379,7 @@ qemuProcessUpdateAndVerifyCPU(virQEMUDriver *driver,
static int
qemuProcessFetchCPUDefinitions(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
virDomainCapsCPUModels **cpuModels)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -4403,7 +4403,7 @@ qemuProcessFetchCPUDefinitions(virQEMUDriver *driver,
static int
qemuProcessUpdateCPU(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virCPUData) cpu = NULL;
g_autoptr(virCPUData) disabled = NULL;
@@ -4613,9 +4613,9 @@ qemuProcessIncomingDefNew(virQEMUCaps *qemuCaps,
/*
- * This function starts a new QEMU_ASYNC_JOB_START async job. The user is
+ * This function starts a new VIR_ASYNC_JOB_START async job. The user is
* responsible for calling qemuProcessEndJob to stop this job and for passing
- * QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this
+ * VIR_ASYNC_JOB_START as @asyncJob argument to any function requiring this
* parameter between qemuProcessBeginJob and qemuProcessEndJob.
*/
int
@@ -4624,11 +4624,11 @@ qemuProcessBeginJob(virQEMUDriver *driver,
virDomainJobOperation operation,
unsigned long apiFlags)
{
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_START,
operation, apiFlags) < 0)
return -1;
- qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
+ qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
return 0;
}
@@ -5083,7 +5083,7 @@ qemuProcessSetupRawIO(virDomainObj *vm,
static int
qemuProcessSetupBalloon(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
unsigned long long balloon = vm->def->mem.cur_balloon;
qemuDomainObjPrivate *priv = vm->privateData;
@@ -5561,7 +5561,7 @@ int
qemuProcessInit(virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool migration,
unsigned int flags)
{
@@ -5952,7 +5952,7 @@ qemuProcessVcpusSortOrder(const void *a,
static int
qemuProcessSetupHotpluggableVcpus(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
qemuDomainObjPrivate *priv = vm->privateData;
@@ -7124,7 +7124,7 @@ qemuProcessGenID(virDomainObj *vm,
static int
qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
size_t i;
@@ -7201,7 +7201,7 @@ qemuProcessEnablePerf(virDomainObj *vm)
static int
qemuProcessSetupDisksTransientSnapshot(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
g_autoptr(GHashTable) blockNamedNodeData = NULL;
@@ -7252,7 +7252,7 @@ qemuProcessSetupDisksTransientSnapshot(virDomainObj *vm,
static int
qemuProcessSetupDisksTransientHotplug(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool hasHotpluggedDisk = false;
@@ -7292,7 +7292,7 @@ qemuProcessSetupDisksTransientHotplug(virDomainObj *vm,
static int
qemuProcessSetupDisksTransient(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -7311,7 +7311,7 @@ qemuProcessSetupDisksTransient(virDomainObj *vm,
static int
qemuProcessSetupLifecycleActions(virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rc;
@@ -7358,7 +7358,7 @@ int
qemuProcessLaunch(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuProcessIncomingDef *incoming,
virDomainMomentObj *snapshot,
virNetDevVPortProfileOp vmop,
@@ -7721,7 +7721,7 @@ qemuProcessLaunch(virConnectPtr conn,
int
qemuProcessRefreshState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -7756,7 +7756,7 @@ qemuProcessRefreshState(virQEMUDriver *driver,
int
qemuProcessFinishStartup(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool startCPUs,
virDomainPausedReason pausedReason)
{
@@ -7794,7 +7794,7 @@ qemuProcessStart(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *migrateFrom,
int migrateFd,
const char *migratePath,
@@ -7814,7 +7814,7 @@ qemuProcessStart(virConnectPtr conn,
"migrateFrom=%s migrateFd=%d migratePath=%s "
"snapshot=%p vmop=%d flags=0x%x",
conn, driver, vm, vm->def->name, vm->def->id,
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath),
snapshot, vmop, flags);
@@ -7922,7 +7922,7 @@ qemuProcessCreatePretendCmdPrepare(virQEMUDriver *driver,
if (!migrateURI)
flags |= VIR_QEMU_PROCESS_START_NEW;
- if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE,
+ if (qemuProcessInit(driver, vm, NULL, VIR_ASYNC_JOB_NONE,
!!migrateURI, flags) < 0)
return -1;
@@ -7993,7 +7993,7 @@ qemuProcessKill(virDomainObj *vm, unsigned int flags)
int
qemuProcessBeginStopJob(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainJob job,
+ virDomainJob job,
bool forceKill)
{
qemuDomainObjPrivate *priv = vm->privateData;
@@ -8026,7 +8026,7 @@ qemuProcessBeginStopJob(virQEMUDriver *driver,
void qemuProcessStop(virQEMUDriver *driver,
virDomainObj *vm,
virDomainShutoffReason reason,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned int flags)
{
int ret;
@@ -8045,21 +8045,21 @@ void qemuProcessStop(virQEMUDriver *driver,
vm, vm->def->name, vm->def->id,
(long long)vm->pid,
virDomainShutoffReasonTypeToString(reason),
- qemuDomainAsyncJobTypeToString(asyncJob),
+ virDomainAsyncJobTypeToString(asyncJob),
flags);
/* This method is routinely used in clean up paths. Disable error
* reporting so we don't squash a legit error. */
virErrorPreserveLast(&orig_err);
- if (asyncJob != QEMU_ASYNC_JOB_NONE) {
+ if (asyncJob != VIR_ASYNC_JOB_NONE) {
if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0)
goto cleanup;
- } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE &&
+ } else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE &&
priv->job.asyncOwner == virThreadSelfID() &&
- priv->job.active != QEMU_JOB_ASYNC_NESTED) {
+ priv->job.active != VIR_JOB_ASYNC_NESTED) {
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob));
+ virDomainAsyncJobTypeToString(asyncJob));
}
if (!virDomainObjIsActive(vm)) {
@@ -8368,7 +8368,7 @@ void qemuProcessStop(virQEMUDriver *driver,
virDomainObjRemoveTransientDef(vm);
endjob:
- if (asyncJob != QEMU_ASYNC_JOB_NONE)
+ if (asyncJob != VIR_ASYNC_JOB_NONE)
qemuDomainObjEndJob(vm);
cleanup:
@@ -8388,7 +8388,7 @@ qemuProcessAutoDestroy(virDomainObj *dom,
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
if (priv->job.asyncJob) {
@@ -8399,11 +8399,11 @@ qemuProcessAutoDestroy(virDomainObj *dom,
VIR_DEBUG("Killing domain");
- if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
+ if (qemuProcessBeginStopJob(driver, dom, VIR_JOB_DESTROY, true) < 0)
return;
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
- QEMU_ASYNC_JOB_NONE, stopFlags);
+ VIR_ASYNC_JOB_NONE, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
@@ -8447,7 +8447,7 @@ bool qemuProcessAutoDestroyActive(virQEMUDriver *driver,
int
qemuProcessRefreshDisks(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
@@ -8498,7 +8498,7 @@ qemuProcessRefreshDisks(virQEMUDriver *driver,
static int
qemuProcessRefreshCPUMigratability(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virDomainDef *def = vm->def;
@@ -8559,7 +8559,7 @@ qemuProcessRefreshCPU(virQEMUDriver *driver,
if (!vm->def->cpu)
return 0;
- if (qemuProcessRefreshCPUMigratability(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshCPUMigratability(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
if (!(host = virQEMUDriverGetHostCPU(driver))) {
@@ -8594,7 +8594,7 @@ qemuProcessRefreshCPU(virQEMUDriver *driver,
if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0)
return -1;
- if (qemuProcessUpdateCPU(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessUpdateCPU(driver, vm, VIR_ASYNC_JOB_NONE) < 0)
return -1;
} else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION))
{
/* We only try to fix CPUs when the libvirt/QEMU combo used to start
@@ -8755,12 +8755,12 @@ qemuProcessReconnect(void *opaque)
priv = obj->privateData;
qemuDomainObjRestoreJob(obj, &oldjob);
- if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
- if (oldjob.asyncJob == QEMU_ASYNC_JOB_BACKUP && priv->backup)
+ if (oldjob.asyncJob == VIR_ASYNC_JOB_BACKUP && priv->backup)
priv->backup->apiFlags = oldjob.apiFlags;
- if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, obj, VIR_JOB_MODIFY) < 0)
goto error;
jobStarted = true;
@@ -8792,7 +8792,7 @@ qemuProcessReconnect(void *opaque)
tryMonReconn = true;
/* XXX check PID liveliness & EXE path */
- if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, retry, NULL) < 0)
+ if (qemuConnectMonitor(driver, obj, VIR_ASYNC_JOB_NONE, retry, NULL) < 0)
goto error;
priv->machineName = qemuDomainGetMachineName(obj);
@@ -8887,7 +8887,7 @@ qemuProcessReconnect(void *opaque)
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
obj->def));
- if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
+ if (qemuDomainRefreshVcpuInfo(driver, obj, VIR_ASYNC_JOB_NONE, true) < 0)
goto error;
qemuDomainVcpuPersistOrder(obj->def);
@@ -8895,10 +8895,10 @@ qemuProcessReconnect(void *opaque)
if (qemuProcessRefreshCPU(driver, obj) < 0)
goto error;
- if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuProcessDetectIOThreadPIDs(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessDetectIOThreadPIDs(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid)
< 0)
@@ -8908,7 +8908,7 @@ qemuProcessReconnect(void *opaque)
qemuProcessFiltersInstantiate(obj->def);
- if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshDisks(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
/* At this point we've already checked that the startup of the VM was
@@ -8922,16 +8922,16 @@ qemuProcessReconnect(void *opaque)
}
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
- qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ qemuBlockNodeNamesDetect(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuRefreshVirtioChannelState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
/* If querying of guest's RTC failed, report error, but do not kill the domain.
*/
qemuRefreshRTC(driver, obj);
- if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
+ if (qemuProcessRefreshBalloonState(driver, obj, VIR_ASYNC_JOB_NONE) < 0)
goto error;
if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
@@ -9030,7 +9030,7 @@ qemuProcessReconnect(void *opaque)
* thread didn't have a chance to start playing with the domain yet
* (it's all we can do anyway).
*/
- qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
+ qemuProcessStop(driver, obj, state, VIR_ASYNC_JOB_NONE, stopFlags);
}
goto cleanup;
}
@@ -9072,7 +9072,7 @@ qemuProcessReconnectHelper(virDomainObj *obj,
* object.
*/
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
- QEMU_ASYNC_JOB_NONE, 0);
+ VIR_ASYNC_JOB_NONE, 0);
qemuDomainRemoveInactiveJobLocked(src->driver, obj);
virDomainObjEndAPI(&obj);
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index 7e6f9f20e5..85c197714a 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -32,11 +32,11 @@ int qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig,
int qemuProcessStartCPUs(virQEMUDriver *driver,
virDomainObj *vm,
virDomainRunningReason reason,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(virQEMUDriver *driver,
virDomainObj *vm,
virDomainPausedReason reason,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessBuildDestroyMemoryPaths(virQEMUDriver *driver,
virDomainObj *vm,
@@ -85,7 +85,7 @@ int qemuProcessStart(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
const char *migrateFrom,
int stdin_fd,
const char *stdin_path,
@@ -107,7 +107,7 @@ virCommand *qemuProcessCreatePretendCmdBuild(virQEMUDriver *driver,
int qemuProcessInit(virQEMUDriver *driver,
virDomainObj *vm,
virCPUDef *updatedCPU,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool migration,
unsigned int flags);
@@ -132,7 +132,7 @@ int qemuProcessPrepareHost(virQEMUDriver *driver,
int qemuProcessLaunch(virConnectPtr conn,
virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
qemuProcessIncomingDef *incoming,
virDomainMomentObj *snapshot,
virNetDevVPortProfileOp vmop,
@@ -140,13 +140,13 @@ int qemuProcessLaunch(virConnectPtr conn,
int qemuProcessFinishStartup(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
bool startCPUs,
virDomainPausedReason pausedReason);
int qemuProcessRefreshState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
typedef enum {
VIR_QEMU_PROCESS_STOP_MIGRATED = 1 << 0,
@@ -155,12 +155,12 @@ typedef enum {
int qemuProcessBeginStopJob(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainJob job,
+ virDomainJob job,
bool forceKill);
void qemuProcessStop(virQEMUDriver *driver,
virDomainObj *vm,
virDomainShutoffReason reason,
- qemuDomainAsyncJob asyncJob,
+ virDomainAsyncJob asyncJob,
unsigned int flags);
typedef enum {
@@ -200,7 +200,7 @@ int qemuProcessSetupIOThread(virDomainObj *vm,
int qemuRefreshVirtioChannelState(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessRefreshBalloonState(virQEMUDriver *driver,
virDomainObj *vm,
@@ -208,7 +208,7 @@ int qemuProcessRefreshBalloonState(virQEMUDriver *driver,
int qemuProcessRefreshDisks(virQEMUDriver *driver,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int qemuProcessStartManagedPRDaemon(virDomainObj *vm) G_GNUC_NO_INLINE;
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index c0139041eb..4fd4c5cfcd 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -259,7 +259,7 @@ qemuSaveImageCreate(virQEMUDriver *driver,
virQEMUSaveData *data,
virCommand *compressor,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
bool needUnlink = false;
@@ -578,7 +578,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
const char *path,
bool start_paused,
bool reset_nvram,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int ret = -1;
diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h
index a0daa4ad2b..391cd55ed0 100644
--- a/src/qemu/qemu_saveimage.h
+++ b/src/qemu/qemu_saveimage.h
@@ -68,7 +68,7 @@ qemuSaveImageStartVM(virConnectPtr conn,
const char *path,
bool start_paused,
bool reset_nvram,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6);
int
@@ -97,7 +97,7 @@ qemuSaveImageCreate(virQEMUDriver *driver,
virQEMUSaveData *data,
virCommand *compressor,
unsigned int flags,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
int
virQEMUSaveDataWrite(virQEMUSaveData *data,
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index 5333730df1..185fcb04a2 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -304,7 +304,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
* domain. Thus we stop and start CPUs ourselves.
*/
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT) < 0)
goto cleanup;
resume = true;
@@ -316,7 +316,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
}
if (qemuDomainObjEnterMonitorAsync(driver, vm,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
resume = false;
goto cleanup;
}
@@ -333,7 +333,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT, 0);
+ VIR_ASYNC_JOB_SNAPSHOT, 0);
virDomainAuditStop(vm, "from-snapshot");
resume = false;
}
@@ -342,7 +342,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriver *driver,
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
@@ -863,7 +863,7 @@ static void
qemuSnapshotDiskCleanup(qemuSnapshotDiskData *data,
size_t ndata,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
@@ -922,7 +922,7 @@ struct _qemuSnapshotDiskContext {
/* needed for automatic cleanup of 'dd' */
virDomainObj *vm;
- qemuDomainAsyncJob asyncJob;
+ virDomainAsyncJob asyncJob;
};
typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
@@ -931,7 +931,7 @@ typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
qemuSnapshotDiskContext *
qemuSnapshotDiskContextNew(size_t ndisks,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
virQEMUDriver *driver = priv->driver;
@@ -1008,7 +1008,7 @@ qemuSnapshotDiskPrepareOneBlockdev(virQEMUDriver *driver,
virQEMUDriverConfig *cfg,
bool reuse,
GHashTable *blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
g_autoptr(virStorageSource) terminator = NULL;
@@ -1165,7 +1165,7 @@ qemuSnapshotDiskPrepareActiveExternal(virDomainObj *vm,
virDomainMomentObj *snap,
bool reuse,
GHashTable *blockNamedNodeData,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
size_t i;
@@ -1319,7 +1319,7 @@ qemuSnapshotCreateActiveExternalDisks(virDomainObj *vm,
virDomainMomentObj *snap,
GHashTable *blockNamedNodeData,
unsigned int flags,
- qemuDomainAsyncJob asyncJob)
+ virDomainAsyncJob asyncJob)
{
bool reuse = (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) != 0;
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
@@ -1371,7 +1371,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) {
int frozen;
- if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) < 0)
goto cleanup;
if (virDomainObjCheckActive(vm) < 0) {
@@ -1405,7 +1405,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
* when the user wants to manually snapshot some disks */
if (((memory || has_manual) && !(flags &
VIR_DOMAIN_SNAPSHOT_CREATE_LIVE))) {
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@@ -1420,7 +1420,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
* migration step as qemu deactivates bitmaps after migration so the result
* would be wrong */
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
- !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_SNAPSHOT)))
+ !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_SNAPSHOT)))
goto cleanup;
/* do the memory snapshot if necessary */
@@ -1434,8 +1434,8 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
/* allow the migration job to be cancelled or the domain to be paused */
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
- JOB_MASK(QEMU_JOB_SUSPEND) |
- JOB_MASK(QEMU_JOB_MIGRATION_OP)));
+ JOB_MASK(VIR_JOB_SUSPEND) |
+ JOB_MASK(VIR_JOB_MIGRATION_OP)));
if ((compressed =
qemuSaveImageGetCompressionProgram(cfg->snapshotImageFormat,
&compressor,
@@ -1458,7 +1458,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
if ((ret = qemuSaveImageCreate(driver, vm, snapdef->memorysnapshotfile,
data, compressor, 0,
- QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT)) < 0)
goto cleanup;
/* the memory image was created, remove it on errors */
@@ -1473,7 +1473,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
if ((ret = qemuSnapshotCreateActiveExternalDisks(vm, snap,
blockNamedNodeData, flags,
- QEMU_ASYNC_JOB_SNAPSHOT)) < 0)
+ VIR_ASYNC_JOB_SNAPSHOT)) < 0)
goto cleanup;
/* the snapshot is complete now */
@@ -1481,7 +1481,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_SNAPSHOT, 0);
+ VIR_ASYNC_JOB_SNAPSHOT, 0);
virDomainAuditStop(vm, "from-snapshot");
resume = false;
thaw = false;
@@ -1503,7 +1503,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED,
- QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
+ VIR_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_SUSPENDED,
VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR);
@@ -1517,7 +1517,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
}
if (thaw &&
- qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) >= 0 &&
+ qemuDomainObjBeginAgentJob(driver, vm, VIR_AGENT_JOB_MODIFY) >= 0 &&
virDomainObjIsActive(vm)) {
/* report error only on an otherwise successful snapshot */
if (qemuSnapshotFSThaw(vm, ret == 0) < 0)
@@ -1889,11 +1889,11 @@ qemuSnapshotCreateXML(virDomainPtr domain,
* a regular job, so we need to set the job mask to disallow query as
* 'savevm' blocks the monitor. External snapshot will then modify the
* job mask appropriately. */
- if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT,
+ if (qemuDomainObjBeginAsyncJob(driver, vm, VIR_ASYNC_JOB_SNAPSHOT,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0)
return NULL;
- qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
+ qemuDomainObjSetAsyncJobMask(vm, VIR_JOB_NONE);
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE) {
snapshot = qemuSnapshotRedefine(vm, domain, def, driver, cfg, flags);
@@ -2067,7 +2067,7 @@ qemuSnapshotRevertActive(virDomainObj *vm,
/* Transitions 5, 6, 8, 9 */
qemuProcessStop(driver, vm,
VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START, 0);
+ VIR_ASYNC_JOB_START, 0);
virDomainAuditStop(vm, "from-snapshot");
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
event = virDomainEventLifecycleNewFromObj(vm,
@@ -2092,7 +2092,7 @@ qemuSnapshotRevertActive(virDomainObj *vm,
rc = qemuProcessStart(snapshot->domain->conn, driver, vm,
cookie ? cookie->cpu : NULL,
- QEMU_ASYNC_JOB_START, NULL, -1, NULL, snap,
+ VIR_ASYNC_JOB_START, NULL, -1, NULL, snap,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags);
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
@@ -2125,7 +2125,7 @@ qemuSnapshotRevertActive(virDomainObj *vm,
}
rc = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START);
+ VIR_ASYNC_JOB_START);
if (rc < 0)
return -1;
}
@@ -2188,7 +2188,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm,
if (virDomainObjIsActive(vm)) {
/* Transitions 4, 7 */
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT,
- QEMU_ASYNC_JOB_START, 0);
+ VIR_ASYNC_JOB_START, 0);
virDomainAuditStop(vm, "from-snapshot");
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
event = virDomainEventLifecycleNewFromObj(vm,
@@ -2215,7 +2215,7 @@ qemuSnapshotRevertInactive(virDomainObj *vm,
start_flags |= paused ? VIR_QEMU_PROCESS_START_PAUSED : 0;
rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL,
- QEMU_ASYNC_JOB_START, NULL, -1, NULL, NULL,
+ VIR_ASYNC_JOB_START, NULL, -1, NULL, NULL,
VIR_NETDEV_VPORT_PROFILE_OP_CREATE,
start_flags);
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
@@ -2394,7 +2394,7 @@ qemuSnapshotDelete(virDomainObj *vm,
VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY |
VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1);
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, VIR_JOB_MODIFY) < 0)
return -1;
if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot)))
diff --git a/src/qemu/qemu_snapshot.h b/src/qemu/qemu_snapshot.h
index ad2bdb1114..0cc38c0039 100644
--- a/src/qemu/qemu_snapshot.h
+++ b/src/qemu/qemu_snapshot.h
@@ -61,7 +61,7 @@ typedef struct _qemuSnapshotDiskContext qemuSnapshotDiskContext;
qemuSnapshotDiskContext *
qemuSnapshotDiskContextNew(size_t ndisks,
virDomainObj *vm,
- qemuDomainAsyncJob asyncJob);
+ virDomainAsyncJob asyncJob);
void
qemuSnapshotDiskContextCleanup(qemuSnapshotDiskContext *snapctxt);
--
2.35.1