All the domain job related APIs were present in `qemu_domain.c`
along with the other domain APIs. In this patch, we move all the
qemu domain job APIs into a separate file.
Also, in this process, `qemuDomainTrackJob()`,
`qemuDomainFreeJob()`, `qemuDomainInitJob()` and
`qemuDomainObjSaveStatus()` were converted to a non-static
funciton and exposed using `qemu_domain.h`.
Signed-off-by: Prathamesh Chavan <pc44800(a)gmail.com>
---
po/POTFILES.in | 1 +
src/qemu/Makefile.inc.am | 2 +
src/qemu/qemu_domain.c | 1162 +-----------------------------------
src/qemu/qemu_domain.h | 247 +-------
src/qemu/qemu_domainjob.c | 1192 +++++++++++++++++++++++++++++++++++++
src/qemu/qemu_domainjob.h | 269 +++++++++
6 files changed, 1470 insertions(+), 1403 deletions(-)
create mode 100644 src/qemu/qemu_domainjob.c
create mode 100644 src/qemu/qemu_domainjob.h
diff --git a/po/POTFILES.in b/po/POTFILES.in
index 6607e298f2..af52054aa4 100644
--- a/po/POTFILES.in
+++ b/po/POTFILES.in
@@ -152,6 +152,7 @@
@SRCDIR(a)/src/qemu/qemu_conf.c
@SRCDIR(a)/src/qemu/qemu_dbus.c
@SRCDIR(a)/src/qemu/qemu_domain.c
+@SRCDIR(a)/src/qemu/qemu_domainjob.c
@SRCDIR(a)/src/qemu/qemu_domain_address.c
@SRCDIR(a)/src/qemu/qemu_driver.c
@SRCDIR(a)/src/qemu/qemu_extdevice.c
diff --git a/src/qemu/Makefile.inc.am b/src/qemu/Makefile.inc.am
index 6a7fc0822b..f83a675ba2 100644
--- a/src/qemu/Makefile.inc.am
+++ b/src/qemu/Makefile.inc.am
@@ -17,6 +17,8 @@ QEMU_DRIVER_SOURCES = \
qemu/qemu_dbus.h \
qemu/qemu_domain.c \
qemu/qemu_domain.h \
+ qemu/qemu_domainjob.c \
+ qemu/qemu_domainjob.h \
qemu/qemu_domain_address.c \
qemu/qemu_domain_address.h \
qemu/qemu_cgroup.c \
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 1ddaa922c5..cb8d00c30b 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -83,44 +83,6 @@
VIR_LOG_INIT("qemu.qemu_domain");
-VIR_ENUM_IMPL(qemuDomainJob,
- QEMU_JOB_LAST,
- "none",
- "query",
- "destroy",
- "suspend",
- "modify",
- "abort",
- "migration operation",
- "none", /* async job is never stored in job.active */
- "async nested",
-);
-
-VIR_ENUM_IMPL(qemuDomainAgentJob,
- QEMU_AGENT_JOB_LAST,
- "none",
- "query",
- "modify",
-);
-
-VIR_ENUM_IMPL(qemuDomainAsyncJob,
- QEMU_ASYNC_JOB_LAST,
- "none",
- "migration out",
- "migration in",
- "save",
- "dump",
- "snapshot",
- "start",
- "backup",
-);
-
-VIR_ENUM_IMPL(qemuDomainNamespace,
- QEMU_DOMAIN_NS_LAST,
- "mount",
-);
-
-
/**
* qemuDomainObjFromDomain:
* @domain: Domain pointer that has to be looked up
@@ -204,58 +166,6 @@ qemuDomainLogContextFinalize(GObject *object)
G_OBJECT_CLASS(qemu_domain_log_context_parent_class)->finalize(object);
}
-const char *
-qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
- int phase G_GNUC_UNUSED)
-{
- switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
- return qemuMigrationJobPhaseTypeToString(phase);
-
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
- G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
- break;
- }
-
- return "none";
-}
-
-int
-qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
- const char *phase)
-{
- if (!phase)
- return 0;
-
- switch (job) {
- case QEMU_ASYNC_JOB_MIGRATION_OUT:
- case QEMU_ASYNC_JOB_MIGRATION_IN:
- return qemuMigrationJobPhaseTypeFromString(phase);
-
- case QEMU_ASYNC_JOB_SAVE:
- case QEMU_ASYNC_JOB_DUMP:
- case QEMU_ASYNC_JOB_SNAPSHOT:
- case QEMU_ASYNC_JOB_START:
- case QEMU_ASYNC_JOB_NONE:
- case QEMU_ASYNC_JOB_BACKUP:
- G_GNUC_FALLTHROUGH;
- case QEMU_ASYNC_JOB_LAST:
- break;
- }
-
- if (STREQ(phase, "none"))
- return 0;
- else
- return -1;
-}
-
bool
qemuDomainNamespaceEnabled(virDomainObjPtr vm,
@@ -304,573 +214,6 @@ qemuDomainDisableNamespace(virDomainObjPtr vm,
}
}
-
-void
-qemuDomainJobInfoFree(qemuDomainJobInfoPtr info)
-{
- g_free(info->errmsg);
- g_free(info);
-}
-
-
-qemuDomainJobInfoPtr
-qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info)
-{
- qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1);
-
- memcpy(ret, info, sizeof(*info));
-
- ret->errmsg = g_strdup(info->errmsg);
-
- return ret;
-}
-
-void
-qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
-{
- qemuDomainObjPrivatePtr priv = vm->privateData;
- virObjectEventPtr event;
- virTypedParameterPtr params = NULL;
- int nparams = 0;
- int type;
-
- if (!priv->job.completed)
- return;
-
- if (qemuDomainJobInfoToParams(priv->job.completed, &type,
- ¶ms, &nparams) < 0) {
- VIR_WARN("Could not get stats for completed job; domain %s",
- vm->def->name);
- }
-
- event = virDomainEventJobCompletedNewFromObj(vm, params, nparams);
- virObjectEventStateQueue(driver->domainEventState, event);
-}
-
-
-static int
-qemuDomainObjInitJob(qemuDomainJobObjPtr job)
-{
- memset(job, 0, sizeof(*job));
-
- if (virCondInit(&job->cond) < 0)
- return -1;
-
- if (virCondInit(&job->asyncCond) < 0) {
- virCondDestroy(&job->cond);
- return -1;
- }
-
- return 0;
-}
-
-static void
-qemuDomainObjResetJob(qemuDomainJobObjPtr job)
-{
- job->active = QEMU_JOB_NONE;
- job->owner = 0;
- job->ownerAPI = NULL;
- job->started = 0;
-}
-
-
-static void
-qemuDomainObjResetAgentJob(qemuDomainJobObjPtr job)
-{
- job->agentActive = QEMU_AGENT_JOB_NONE;
- job->agentOwner = 0;
- job->agentOwnerAPI = NULL;
- job->agentStarted = 0;
-}
-
-
-static void
-qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job)
-{
- job->asyncJob = QEMU_ASYNC_JOB_NONE;
- job->asyncOwner = 0;
- job->asyncOwnerAPI = NULL;
- job->asyncStarted = 0;
- job->phase = 0;
- job->mask = QEMU_JOB_DEFAULT_MASK;
- job->abortJob = false;
- job->spiceMigration = false;
- job->spiceMigrated = false;
- job->dumpCompleted = false;
- VIR_FREE(job->error);
- g_clear_pointer(&job->current, qemuDomainJobInfoFree);
- qemuMigrationParamsFree(job->migParams);
- job->migParams = NULL;
- job->apiFlags = 0;
-}
-
-void
-qemuDomainObjRestoreJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr job)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- memset(job, 0, sizeof(*job));
- job->active = priv->job.active;
- job->owner = priv->job.owner;
- job->asyncJob = priv->job.asyncJob;
- job->asyncOwner = priv->job.asyncOwner;
- job->phase = priv->job.phase;
- job->migParams = g_steal_pointer(&priv->job.migParams);
- job->apiFlags = priv->job.apiFlags;
-
- qemuDomainObjResetJob(&priv->job);
- qemuDomainObjResetAsyncJob(&priv->job);
-}
-
-static void
-qemuDomainObjFreeJob(qemuDomainJobObjPtr job)
-{
- qemuDomainObjResetJob(job);
- qemuDomainObjResetAsyncJob(job);
- g_clear_pointer(&job->current, qemuDomainJobInfoFree);
- g_clear_pointer(&job->completed, qemuDomainJobInfoFree);
- virCondDestroy(&job->cond);
- virCondDestroy(&job->asyncCond);
-}
-
-static bool
-qemuDomainTrackJob(qemuDomainJob job)
-{
- return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
-}
-
-
-int
-qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
-{
- unsigned long long now;
-
- if (!jobInfo->started)
- return 0;
-
- if (virTimeMillisNow(&now) < 0)
- return -1;
-
- if (now < jobInfo->started) {
- VIR_WARN("Async job starts in the future");
- jobInfo->started = 0;
- return 0;
- }
-
- jobInfo->timeElapsed = now - jobInfo->started;
- return 0;
-}
-
-int
-qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
-{
- unsigned long long now;
-
- if (!jobInfo->stopped)
- return 0;
-
- if (virTimeMillisNow(&now) < 0)
- return -1;
-
- if (now < jobInfo->stopped) {
- VIR_WARN("Guest's CPUs stopped in the future");
- jobInfo->stopped = 0;
- return 0;
- }
-
- jobInfo->stats.mig.downtime = now - jobInfo->stopped;
- jobInfo->stats.mig.downtime_set = true;
- return 0;
-}
-
-static virDomainJobType
-qemuDomainJobStatusToType(qemuDomainJobStatus status)
-{
- switch (status) {
- case QEMU_DOMAIN_JOB_STATUS_NONE:
- break;
-
- case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
- case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
- case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
- case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
- case QEMU_DOMAIN_JOB_STATUS_PAUSED:
- return VIR_DOMAIN_JOB_UNBOUNDED;
-
- case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
- return VIR_DOMAIN_JOB_COMPLETED;
-
- case QEMU_DOMAIN_JOB_STATUS_FAILED:
- return VIR_DOMAIN_JOB_FAILED;
-
- case QEMU_DOMAIN_JOB_STATUS_CANCELED:
- return VIR_DOMAIN_JOB_CANCELLED;
- }
-
- return VIR_DOMAIN_JOB_NONE;
-}
-
-int
-qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
- virDomainJobInfoPtr info)
-{
- info->type = qemuDomainJobStatusToType(jobInfo->status);
- info->timeElapsed = jobInfo->timeElapsed;
-
- switch (jobInfo->statsType) {
- case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
- info->memTotal = jobInfo->stats.mig.ram_total;
- info->memRemaining = jobInfo->stats.mig.ram_remaining;
- info->memProcessed = jobInfo->stats.mig.ram_transferred;
- info->fileTotal = jobInfo->stats.mig.disk_total +
- jobInfo->mirrorStats.total;
- info->fileRemaining = jobInfo->stats.mig.disk_remaining +
- (jobInfo->mirrorStats.total -
- jobInfo->mirrorStats.transferred);
- info->fileProcessed = jobInfo->stats.mig.disk_transferred +
- jobInfo->mirrorStats.transferred;
- break;
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
- info->memTotal = jobInfo->stats.mig.ram_total;
- info->memRemaining = jobInfo->stats.mig.ram_remaining;
- info->memProcessed = jobInfo->stats.mig.ram_transferred;
- break;
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
- info->memTotal = jobInfo->stats.dump.total;
- info->memProcessed = jobInfo->stats.dump.completed;
- info->memRemaining = info->memTotal - info->memProcessed;
- break;
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
- info->fileTotal = jobInfo->stats.backup.total;
- info->fileProcessed = jobInfo->stats.backup.transferred;
- info->fileRemaining = info->fileTotal - info->fileProcessed;
- break;
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
- break;
- }
-
- info->dataTotal = info->memTotal + info->fileTotal;
- info->dataRemaining = info->memRemaining + info->fileRemaining;
- info->dataProcessed = info->memProcessed + info->fileProcessed;
-
- return 0;
-}
-
-
-static int
-qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
- int *type,
- virTypedParameterPtr *params,
- int *nparams)
-{
- qemuMonitorMigrationStats *stats = &jobInfo->stats.mig;
- qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats;
- virTypedParameterPtr par = NULL;
- int maxpar = 0;
- int npar = 0;
- unsigned long long mirrorRemaining = mirrorStats->total -
- mirrorStats->transferred;
-
- if (virTypedParamsAddInt(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_OPERATION,
- jobInfo->operation) < 0)
- goto error;
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_TIME_ELAPSED,
- jobInfo->timeElapsed) < 0)
- goto error;
-
- if (jobInfo->timeDeltaSet &&
- jobInfo->timeElapsed > jobInfo->timeDelta &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_TIME_ELAPSED_NET,
- jobInfo->timeElapsed - jobInfo->timeDelta) < 0)
- goto error;
-
- if (stats->downtime_set &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DOWNTIME,
- stats->downtime) < 0)
- goto error;
-
- if (stats->downtime_set &&
- jobInfo->timeDeltaSet &&
- stats->downtime > jobInfo->timeDelta &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DOWNTIME_NET,
- stats->downtime - jobInfo->timeDelta) < 0)
- goto error;
-
- if (stats->setup_time_set &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_SETUP_TIME,
- stats->setup_time) < 0)
- goto error;
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DATA_TOTAL,
- stats->ram_total +
- stats->disk_total +
- mirrorStats->total) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DATA_PROCESSED,
- stats->ram_transferred +
- stats->disk_transferred +
- mirrorStats->transferred) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DATA_REMAINING,
- stats->ram_remaining +
- stats->disk_remaining +
- mirrorRemaining) < 0)
- goto error;
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_TOTAL,
- stats->ram_total) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_PROCESSED,
- stats->ram_transferred) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_REMAINING,
- stats->ram_remaining) < 0)
- goto error;
-
- if (stats->ram_bps &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_BPS,
- stats->ram_bps) < 0)
- goto error;
-
- if (stats->ram_duplicate_set) {
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_CONSTANT,
- stats->ram_duplicate) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_NORMAL,
- stats->ram_normal) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES,
- stats->ram_normal_bytes) < 0)
- goto error;
- }
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
- stats->ram_dirty_rate) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_ITERATION,
- stats->ram_iteration) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_POSTCOPY_REQS,
- stats->ram_postcopy_reqs) < 0)
- goto error;
-
- if (stats->ram_page_size > 0 &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_PAGE_SIZE,
- stats->ram_page_size) < 0)
- goto error;
-
- /* The remaining stats are disk, mirror, or migration specific
- * so if this is a SAVEDUMP, we can just skip them */
- if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
- goto done;
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DISK_TOTAL,
- stats->disk_total +
- mirrorStats->total) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DISK_PROCESSED,
- stats->disk_transferred +
- mirrorStats->transferred) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DISK_REMAINING,
- stats->disk_remaining +
- mirrorRemaining) < 0)
- goto error;
-
- if (stats->disk_bps &&
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_DISK_BPS,
- stats->disk_bps) < 0)
- goto error;
-
- if (stats->xbzrle_set) {
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_COMPRESSION_CACHE,
- stats->xbzrle_cache_size) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_COMPRESSION_BYTES,
- stats->xbzrle_bytes) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_COMPRESSION_PAGES,
- stats->xbzrle_pages) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES,
- stats->xbzrle_cache_miss) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW,
- stats->xbzrle_overflow) < 0)
- goto error;
- }
-
- if (stats->cpu_throttle_percentage &&
- virTypedParamsAddInt(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE,
- stats->cpu_throttle_percentage) < 0)
- goto error;
-
- done:
- *type = qemuDomainJobStatusToType(jobInfo->status);
- *params = par;
- *nparams = npar;
- return 0;
-
- error:
- virTypedParamsFree(par, npar);
- return -1;
-}
-
-
-static int
-qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
- int *type,
- virTypedParameterPtr *params,
- int *nparams)
-{
- qemuMonitorDumpStats *stats = &jobInfo->stats.dump;
- virTypedParameterPtr par = NULL;
- int maxpar = 0;
- int npar = 0;
-
- if (virTypedParamsAddInt(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_OPERATION,
- jobInfo->operation) < 0)
- goto error;
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_TIME_ELAPSED,
- jobInfo->timeElapsed) < 0)
- goto error;
-
- if (virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_TOTAL,
- stats->total) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_PROCESSED,
- stats->completed) < 0 ||
- virTypedParamsAddULLong(&par, &npar, &maxpar,
- VIR_DOMAIN_JOB_MEMORY_REMAINING,
- stats->total - stats->completed) < 0)
- goto error;
-
- *type = qemuDomainJobStatusToType(jobInfo->status);
- *params = par;
- *nparams = npar;
- return 0;
-
- error:
- virTypedParamsFree(par, npar);
- return -1;
-}
-
-
-static int
-qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
- int *type,
- virTypedParameterPtr *params,
- int *nparams)
-{
- qemuDomainBackupStats *stats = &jobInfo->stats.backup;
- g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1);
-
- if (virTypedParamListAddInt(par, jobInfo->operation,
- VIR_DOMAIN_JOB_OPERATION) < 0)
- return -1;
-
- if (virTypedParamListAddULLong(par, jobInfo->timeElapsed,
- VIR_DOMAIN_JOB_TIME_ELAPSED) < 0)
- return -1;
-
- if (stats->transferred > 0 || stats->total > 0) {
- if (virTypedParamListAddULLong(par, stats->total,
- VIR_DOMAIN_JOB_DISK_TOTAL) < 0)
- return -1;
-
- if (virTypedParamListAddULLong(par, stats->transferred,
- VIR_DOMAIN_JOB_DISK_PROCESSED) < 0)
- return -1;
-
- if (virTypedParamListAddULLong(par, stats->total - stats->transferred,
- VIR_DOMAIN_JOB_DISK_REMAINING) < 0)
- return -1;
- }
-
- if (stats->tmp_used > 0 || stats->tmp_total > 0) {
- if (virTypedParamListAddULLong(par, stats->tmp_used,
- VIR_DOMAIN_JOB_DISK_TEMP_USED) < 0)
- return -1;
-
- if (virTypedParamListAddULLong(par, stats->tmp_total,
- VIR_DOMAIN_JOB_DISK_TEMP_TOTAL) < 0)
- return -1;
- }
-
- if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
- virTypedParamListAddBoolean(par,
- jobInfo->status ==
QEMU_DOMAIN_JOB_STATUS_COMPLETED,
- VIR_DOMAIN_JOB_SUCCESS) < 0)
- return -1;
-
- if (jobInfo->errmsg &&
- virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) <
0)
- return -1;
-
- *nparams = virTypedParamListStealParams(par, params);
- *type = qemuDomainJobStatusToType(jobInfo->status);
- return 0;
-}
-
-
-int
-qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
- int *type,
- virTypedParameterPtr *params,
- int *nparams)
-{
- switch (jobInfo->statsType) {
- case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
- case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
- return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams);
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
- return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams);
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
- return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams);
-
- case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("invalid job statistics type"));
- break;
-
- default:
- virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType);
- break;
- }
-
- return -1;
-}
-
-
/* qemuDomainGetMasterKeyFilePath:
* @libDir: Directory path to domain lib files
*
@@ -6123,7 +5466,7 @@ virDomainDefParserConfig virQEMUDriverDomainDefParserConfig = {
};
-static void
+void
qemuDomainObjSaveStatus(virQEMUDriverPtr driver,
virDomainObjPtr obj)
{
@@ -6165,508 +5508,6 @@ qemuDomainSaveConfig(virDomainObjPtr obj)
}
-void
-qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- int phase)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
- unsigned long long me = virThreadSelfID();
-
- if (!priv->job.asyncJob)
- return;
-
- VIR_DEBUG("Setting '%s' phase to '%s'",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
-
- if (priv->job.asyncOwner && me != priv->job.asyncOwner) {
- VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- priv->job.asyncOwner);
- }
-
- priv->job.phase = phase;
- priv->job.asyncOwner = me;
- qemuDomainObjSaveStatus(driver, obj);
-}
-
-void
-qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
- unsigned long long allowedJobs)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- if (!priv->job.asyncJob)
- return;
-
- priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
-}
-
-void
-qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
- qemuDomainObjResetJob(&priv->job);
- qemuDomainObjResetAsyncJob(&priv->job);
- qemuDomainObjSaveStatus(driver, obj);
-}
-
-void
-qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- VIR_DEBUG("Releasing ownership of '%s' async job",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
-
- if (priv->job.asyncOwner != virThreadSelfID()) {
- VIR_WARN("'%s' async job is owned by thread %llu",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- priv->job.asyncOwner);
- }
- priv->job.asyncOwner = 0;
-}
-
-static bool
-qemuDomainNestedJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob)
-{
- return !jobs->asyncJob ||
- newJob == QEMU_JOB_NONE ||
- (jobs->mask & JOB_MASK(newJob)) != 0;
-}
-
-bool
-qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob)
-{
- return !jobs->active && qemuDomainNestedJobAllowed(jobs, newJob);
-}
-
-static bool
-qemuDomainObjCanSetJob(qemuDomainJobObjPtr job,
- qemuDomainJob newJob,
- qemuDomainAgentJob newAgentJob)
-{
- return ((newJob == QEMU_JOB_NONE ||
- job->active == QEMU_JOB_NONE) &&
- (newAgentJob == QEMU_AGENT_JOB_NONE ||
- job->agentActive == QEMU_AGENT_JOB_NONE));
-}
-
-/* Give up waiting for mutex after 30 seconds */
-#define QEMU_JOB_WAIT_TIME (1000ull * 30)
-
-/**
- * qemuDomainObjBeginJobInternal:
- * @driver: qemu driver
- * @obj: domain object
- * @job: qemuDomainJob to start
- * @asyncJob: qemuDomainAsyncJob to start
- * @nowait: don't wait trying to acquire @job
- *
- * Acquires job for a domain object which must be locked before
- * calling. If there's already a job running waits up to
- * QEMU_JOB_WAIT_TIME after which the functions fails reporting
- * an error unless @nowait is set.
- *
- * If @nowait is true this function tries to acquire job and if
- * it fails, then it returns immediately without waiting. No
- * error is reported in this case.
- *
- * Returns: 0 on success,
- * -2 if unable to start job because of timeout or
- * maxQueuedJobs limit,
- * -1 otherwise.
- */
-static int ATTRIBUTE_NONNULL(1)
-qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainJob job,
- qemuDomainAgentJob agentJob,
- qemuDomainAsyncJob asyncJob,
- bool nowait)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
- unsigned long long now;
- unsigned long long then;
- bool nested = job == QEMU_JOB_ASYNC_NESTED;
- bool async = job == QEMU_JOB_ASYNC;
- g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
- const char *blocker = NULL;
- const char *agentBlocker = NULL;
- int ret = -1;
- unsigned long long duration = 0;
- unsigned long long agentDuration = 0;
- unsigned long long asyncDuration = 0;
-
- VIR_DEBUG("Starting job: job=%s agentJob=%s asyncJob=%s "
- "(vm=%p name=%s, current job=%s agentJob=%s async=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
- obj, obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAgentJobTypeToString(priv->job.agentActive),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
-
- if (virTimeMillisNow(&now) < 0)
- return -1;
-
- priv->jobs_queued++;
- then = now + QEMU_JOB_WAIT_TIME;
-
- retry:
- if ((!async && job != QEMU_JOB_DESTROY) &&
- cfg->maxQueuedJobs &&
- priv->jobs_queued > cfg->maxQueuedJobs) {
- goto error;
- }
-
- while (!nested && !qemuDomainNestedJobAllowed(&priv->job, job)) {
- if (nowait)
- goto cleanup;
-
- VIR_DEBUG("Waiting for async job (vm=%p name=%s)", obj,
obj->def->name);
- if (virCondWaitUntil(&priv->job.asyncCond, &obj->parent.lock, then)
< 0)
- goto error;
- }
-
- while (!qemuDomainObjCanSetJob(&priv->job, job, agentJob)) {
- if (nowait)
- goto cleanup;
-
- VIR_DEBUG("Waiting for job (vm=%p name=%s)", obj,
obj->def->name);
- if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) <
0)
- goto error;
- }
-
- /* No job is active but a new async job could have been started while obj
- * was unlocked, so we need to recheck it. */
- if (!nested && !qemuDomainNestedJobAllowed(&priv->job, job))
- goto retry;
-
- ignore_value(virTimeMillisNow(&now));
-
- if (job) {
- qemuDomainObjResetJob(&priv->job);
-
- if (job != QEMU_JOB_ASYNC) {
- VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- obj, obj->def->name);
- priv->job.active = job;
- priv->job.owner = virThreadSelfID();
- priv->job.ownerAPI = virThreadJobGet();
- priv->job.started = now;
- } else {
- VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(asyncJob),
- obj, obj->def->name);
- qemuDomainObjResetAsyncJob(&priv->job);
- priv->job.current = g_new0(qemuDomainJobInfo, 1);
- priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
- priv->job.asyncJob = asyncJob;
- priv->job.asyncOwner = virThreadSelfID();
- priv->job.asyncOwnerAPI = virThreadJobGet();
- priv->job.asyncStarted = now;
- priv->job.current->started = now;
- }
- }
-
- if (agentJob) {
- qemuDomainObjResetAgentJob(&priv->job);
-
- VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
- obj, obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
- priv->job.agentActive = agentJob;
- priv->job.agentOwner = virThreadSelfID();
- priv->job.agentOwnerAPI = virThreadJobGet();
- priv->job.agentStarted = now;
- }
-
- if (qemuDomainTrackJob(job))
- qemuDomainObjSaveStatus(driver, obj);
-
- return 0;
-
- error:
- ignore_value(virTimeMillisNow(&now));
- if (priv->job.active && priv->job.started)
- duration = now - priv->job.started;
- if (priv->job.agentActive && priv->job.agentStarted)
- agentDuration = now - priv->job.agentStarted;
- if (priv->job.asyncJob && priv->job.asyncStarted)
- asyncDuration = now - priv->job.asyncStarted;
-
- VIR_WARN("Cannot start job (%s, %s, %s) for domain %s; "
- "current job is (%s, %s, %s) "
- "owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) "
- "for (%llus, %llus, %llus)",
- qemuDomainJobTypeToString(job),
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(asyncJob),
- obj->def->name,
- qemuDomainJobTypeToString(priv->job.active),
- qemuDomainAgentJobTypeToString(priv->job.agentActive),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- priv->job.owner, NULLSTR(priv->job.ownerAPI),
- priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI),
- priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
- priv->job.apiFlags,
- duration / 1000, agentDuration / 1000, asyncDuration / 1000);
-
- if (job) {
- if (nested || qemuDomainNestedJobAllowed(&priv->job, job))
- blocker = priv->job.ownerAPI;
- else
- blocker = priv->job.asyncOwnerAPI;
- }
-
- if (agentJob)
- agentBlocker = priv->job.agentOwnerAPI;
-
- if (errno == ETIMEDOUT) {
- if (blocker && agentBlocker) {
- virReportError(VIR_ERR_OPERATION_TIMEOUT,
- _("cannot acquire state change "
- "lock (held by monitor=%s agent=%s)"),
- blocker, agentBlocker);
- } else if (blocker) {
- virReportError(VIR_ERR_OPERATION_TIMEOUT,
- _("cannot acquire state change "
- "lock (held by monitor=%s)"),
- blocker);
- } else if (agentBlocker) {
- virReportError(VIR_ERR_OPERATION_TIMEOUT,
- _("cannot acquire state change "
- "lock (held by agent=%s)"),
- agentBlocker);
- } else {
- virReportError(VIR_ERR_OPERATION_TIMEOUT, "%s",
- _("cannot acquire state change lock"));
- }
- ret = -2;
- } else if (cfg->maxQueuedJobs &&
- priv->jobs_queued > cfg->maxQueuedJobs) {
- if (blocker && agentBlocker) {
- virReportError(VIR_ERR_OPERATION_FAILED,
- _("cannot acquire state change "
- "lock (held by monitor=%s agent=%s) "
- "due to max_queued limit"),
- blocker, agentBlocker);
- } else if (blocker) {
- virReportError(VIR_ERR_OPERATION_FAILED,
- _("cannot acquire state change "
- "lock (held by monitor=%s) "
- "due to max_queued limit"),
- blocker);
- } else if (agentBlocker) {
- virReportError(VIR_ERR_OPERATION_FAILED,
- _("cannot acquire state change "
- "lock (held by agent=%s) "
- "due to max_queued limit"),
- agentBlocker);
- } else {
- virReportError(VIR_ERR_OPERATION_FAILED, "%s",
- _("cannot acquire state change lock "
- "due to max_queued limit"));
- }
- ret = -2;
- } else {
- virReportSystemError(errno, "%s", _("cannot acquire job
mutex"));
- }
-
- cleanup:
- priv->jobs_queued--;
- return ret;
-}
-
-/*
- * obj must be locked before calling
- *
- * This must be called by anything that will change the VM state
- * in any way, or anything that will use the QEMU monitor.
- *
- * Successful calls must be followed by EndJob eventually
- */
-int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainJob job)
-{
- if (qemuDomainObjBeginJobInternal(driver, obj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, false) < 0)
- return -1;
- else
- return 0;
-}
-
-/**
- * qemuDomainObjBeginAgentJob:
- *
- * Grabs agent type of job. Use if caller talks to guest agent only.
- *
- * To end job call qemuDomainObjEndAgentJob.
- */
-int
-qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainAgentJob agentJob)
-{
- return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE,
- agentJob,
- QEMU_ASYNC_JOB_NONE, false);
-}
-
-int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob,
- virDomainJobOperation operation,
- unsigned long apiFlags)
-{
- qemuDomainObjPrivatePtr priv;
-
- if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
- QEMU_AGENT_JOB_NONE,
- asyncJob, false) < 0)
- return -1;
-
- priv = obj->privateData;
- priv->job.current->operation = operation;
- priv->job.apiFlags = apiFlags;
- return 0;
-}
-
-int
-qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- if (asyncJob != priv->job.asyncJob) {
- virReportError(VIR_ERR_INTERNAL_ERROR,
- _("unexpected async job %d type expected %d"),
- asyncJob, priv->job.asyncJob);
- return -1;
- }
-
- if (priv->job.asyncOwner != virThreadSelfID()) {
- VIR_WARN("This thread doesn't seem to be the async job owner:
%llu",
- priv->job.asyncOwner);
- }
-
- return qemuDomainObjBeginJobInternal(driver, obj,
- QEMU_JOB_ASYNC_NESTED,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE,
- false);
-}
-
-/**
- * qemuDomainObjBeginJobNowait:
- *
- * @driver: qemu driver
- * @obj: domain object
- * @job: qemuDomainJob to start
- *
- * Acquires job for a domain object which must be locked before
- * calling. If there's already a job running it returns
- * immediately without any error reported.
- *
- * Returns: see qemuDomainObjBeginJobInternal
- */
-int
-qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainJob job)
-{
- return qemuDomainObjBeginJobInternal(driver, obj, job,
- QEMU_AGENT_JOB_NONE,
- QEMU_ASYNC_JOB_NONE, true);
-}
-
-/*
- * obj must be locked and have a reference before calling
- *
- * To be called after completing the work associated with the
- * earlier qemuDomainBeginJob() call
- */
-void
-qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
- qemuDomainJob job = priv->job.active;
-
- priv->jobs_queued--;
-
- VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
- qemuDomainJobTypeToString(job),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- obj, obj->def->name);
-
- qemuDomainObjResetJob(&priv->job);
- if (qemuDomainTrackJob(job))
- qemuDomainObjSaveStatus(driver, obj);
- /* We indeed need to wake up ALL threads waiting because
- * grabbing a job requires checking more variables. */
- virCondBroadcast(&priv->job.cond);
-}
-
-void
-qemuDomainObjEndAgentJob(virDomainObjPtr obj)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
- qemuDomainAgentJob agentJob = priv->job.agentActive;
-
- priv->jobs_queued--;
-
- VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
- qemuDomainAgentJobTypeToString(agentJob),
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- obj, obj->def->name);
-
- qemuDomainObjResetAgentJob(&priv->job);
- /* We indeed need to wake up ALL threads waiting because
- * grabbing a job requires checking more variables. */
- virCondBroadcast(&priv->job.cond);
-}
-
-void
-qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- priv->jobs_queued--;
-
- VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- obj, obj->def->name);
-
- qemuDomainObjResetAsyncJob(&priv->job);
- qemuDomainObjSaveStatus(driver, obj);
- virCondBroadcast(&priv->job.asyncCond);
-}
-
-void
-qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
-{
- qemuDomainObjPrivatePtr priv = obj->privateData;
-
- VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
- qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
- obj, obj->def->name);
-
- priv->job.abortJob = true;
- virDomainObjBroadcast(obj);
-}
-
/*
* obj must be locked before calling
*
@@ -7870,7 +6711,6 @@ qemuDomainRemoveInactiveLocked(virQEMUDriverPtr driver,
virDomainObjListRemoveLocked(driver->domains, vm);
}
-
/**
* qemuDomainRemoveInactiveJob:
*
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 19e80fef2b..15ffd87cb5 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -31,6 +31,7 @@
#include "qemu_monitor.h"
#include "qemu_agent.h"
#include "qemu_blockjob.h"
+#include "qemu_domainjob.h"
#include "qemu_conf.h"
#include "qemu_capabilities.h"
#include "qemu_migration_params.h"
@@ -54,182 +55,15 @@
# define QEMU_DOMAIN_MIG_BANDWIDTH_MAX (INT64_MAX / (1024 * 1024))
#endif
-#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
-#define QEMU_JOB_DEFAULT_MASK \
- (JOB_MASK(QEMU_JOB_QUERY) | \
- JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ABORT))
-
-/* Jobs which have to be tracked in domain state XML. */
-#define QEMU_DOMAIN_TRACK_JOBS \
- (JOB_MASK(QEMU_JOB_DESTROY) | \
- JOB_MASK(QEMU_JOB_ASYNC))
-
-/* Only 1 job is allowed at any time
- * A job includes *all* monitor commands, even those just querying
- * information, not merely actions */
-typedef enum {
- QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
- QEMU_JOB_QUERY, /* Doesn't change any state */
- QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
- QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
- QEMU_JOB_MODIFY, /* May change state */
- QEMU_JOB_ABORT, /* Abort current async job */
- QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
-
- /* The following two items must always be the last items before JOB_LAST */
- QEMU_JOB_ASYNC, /* Asynchronous job */
- QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
-
- QEMU_JOB_LAST
-} qemuDomainJob;
-VIR_ENUM_DECL(qemuDomainJob);
-
-typedef enum {
- QEMU_AGENT_JOB_NONE = 0, /* No agent job. */
- QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */
- QEMU_AGENT_JOB_MODIFY, /* May change state of domain */
-
- QEMU_AGENT_JOB_LAST
-} qemuDomainAgentJob;
-VIR_ENUM_DECL(qemuDomainAgentJob);
-
-/* Async job consists of a series of jobs that may change state. Independent
- * jobs that do not change state (and possibly others if explicitly allowed by
- * current async job) are allowed to be run even if async job is active.
- */
-typedef enum {
- QEMU_ASYNC_JOB_NONE = 0,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- QEMU_ASYNC_JOB_SAVE,
- QEMU_ASYNC_JOB_DUMP,
- QEMU_ASYNC_JOB_SNAPSHOT,
- QEMU_ASYNC_JOB_START,
- QEMU_ASYNC_JOB_BACKUP,
-
- QEMU_ASYNC_JOB_LAST
-} qemuDomainAsyncJob;
-VIR_ENUM_DECL(qemuDomainAsyncJob);
-
-typedef enum {
- QEMU_DOMAIN_JOB_STATUS_NONE = 0,
- QEMU_DOMAIN_JOB_STATUS_ACTIVE,
- QEMU_DOMAIN_JOB_STATUS_MIGRATING,
- QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
- QEMU_DOMAIN_JOB_STATUS_PAUSED,
- QEMU_DOMAIN_JOB_STATUS_POSTCOPY,
- QEMU_DOMAIN_JOB_STATUS_COMPLETED,
- QEMU_DOMAIN_JOB_STATUS_FAILED,
- QEMU_DOMAIN_JOB_STATUS_CANCELED,
-} qemuDomainJobStatus;
-
-typedef enum {
- QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0,
- QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
- QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP,
- QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP,
- QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP,
-} qemuDomainJobStatsType;
-
-
-typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats;
-typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr;
-struct _qemuDomainMirrorStats {
- unsigned long long transferred;
- unsigned long long total;
-};
-
-typedef struct _qemuDomainBackupStats qemuDomainBackupStats;
-struct _qemuDomainBackupStats {
- unsigned long long transferred;
- unsigned long long total;
- unsigned long long tmp_used;
- unsigned long long tmp_total;
-};
-
-typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
-typedef qemuDomainJobInfo *qemuDomainJobInfoPtr;
-struct _qemuDomainJobInfo {
- qemuDomainJobStatus status;
- virDomainJobOperation operation;
- unsigned long long started; /* When the async job started */
- unsigned long long stopped; /* When the domain's CPUs were stopped */
- unsigned long long sent; /* When the source sent status info to the
- destination (only for migrations). */
- unsigned long long received; /* When the destination host received status
- info from the source (migrations only). */
- /* Computed values */
- unsigned long long timeElapsed;
- long long timeDelta; /* delta = received - sent, i.e., the difference
- between the source and the destination time plus
- the time between the end of Perform phase on the
- source and the beginning of Finish phase on the
- destination. */
- bool timeDeltaSet;
- /* Raw values from QEMU */
- qemuDomainJobStatsType statsType;
- union {
- qemuMonitorMigrationStats mig;
- qemuMonitorDumpStats dump;
- qemuDomainBackupStats backup;
- } stats;
- qemuDomainMirrorStats mirrorStats;
-
- char *errmsg; /* optional error message for failed completed jobs */
-};
-
-void
-qemuDomainJobInfoFree(qemuDomainJobInfoPtr info);
-
-G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree);
-
-qemuDomainJobInfoPtr
-qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info);
-
-typedef struct _qemuDomainJobObj qemuDomainJobObj;
-typedef qemuDomainJobObj *qemuDomainJobObjPtr;
-struct _qemuDomainJobObj {
- virCond cond; /* Use to coordinate jobs */
-
- /* The following members are for QEMU_JOB_* */
- qemuDomainJob active; /* Currently running job */
- unsigned long long owner; /* Thread id which set current job */
- const char *ownerAPI; /* The API which owns the job */
- unsigned long long started; /* When the current job started */
-
- /* The following members are for QEMU_AGENT_JOB_* */
- qemuDomainAgentJob agentActive; /* Currently running agent job */
- unsigned long long agentOwner; /* Thread id which set current agent job */
- const char *agentOwnerAPI; /* The API which owns the agent job */
- unsigned long long agentStarted; /* When the current agent job started */
-
- /* The following members are for QEMU_ASYNC_JOB_* */
- virCond asyncCond; /* Use to coordinate with async jobs */
- qemuDomainAsyncJob asyncJob; /* Currently active async job */
- unsigned long long asyncOwner; /* Thread which set current async job */
- const char *asyncOwnerAPI; /* The API which owns the async job */
- unsigned long long asyncStarted; /* When the current async job started */
- int phase; /* Job phase (mainly for migrations) */
- unsigned long long mask; /* Jobs allowed during async job */
- qemuDomainJobInfoPtr current; /* async job progress data */
- qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job
*/
- bool abortJob; /* abort of the job requested */
- bool spiceMigration; /* we asked for spice migration and we
- * should wait for it to finish */
- bool spiceMigrated; /* spice migration completed */
- char *error; /* job event completion error */
- bool dumpCompleted; /* dump completed */
-
- qemuMigrationParamsPtr migParams;
- unsigned long apiFlags; /* flags passed to the API which started the async job */
-};
-
typedef void (*qemuDomainCleanupCallback)(virQEMUDriverPtr driver,
virDomainObjPtr vm);
#define QEMU_DOMAIN_MASTER_KEY_LEN 32 /* 32 bytes for 256 bit random key */
+void
+qemuDomainObjSaveStatus(virQEMUDriverPtr driver,
+ virDomainObjPtr obj);
+
void qemuDomainSaveStatus(virDomainObjPtr obj);
void qemuDomainSaveConfig(virDomainObjPtr obj);
@@ -660,56 +494,8 @@ virDomainObjPtr qemuDomainObjFromDomain(virDomainPtr domain);
qemuDomainSaveCookiePtr qemuDomainSaveCookieNew(virDomainObjPtr vm);
-const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
- int phase);
-int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
- const char *phase);
-
void qemuDomainEventFlush(int timer, void *opaque);
-void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
- virDomainObjPtr vm);
-
-int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainJob job)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainAgentJob agentJob)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob,
- virDomainJobOperation operation,
- unsigned long apiFlags)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainAsyncJob asyncJob)
- G_GNUC_WARN_UNUSED_RESULT;
-int qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- qemuDomainJob job)
- G_GNUC_WARN_UNUSED_RESULT;
-
-void qemuDomainObjEndJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj);
-void qemuDomainObjEndAgentJob(virDomainObjPtr obj);
-void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj);
-void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
-void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
- virDomainObjPtr obj,
- int phase);
-void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
- unsigned long long allowedJobs);
-void qemuDomainObjRestoreJob(virDomainObjPtr obj,
- qemuDomainJobObjPtr job);
-void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver,
- virDomainObjPtr obj);
-void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj);
-
qemuMonitorPtr qemuDomainGetMonitor(virDomainObjPtr vm)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
@@ -850,19 +636,10 @@ int qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
void qemuDomainRemoveInactive(virQEMUDriverPtr driver,
virDomainObjPtr vm);
-void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver,
- virDomainObjPtr vm);
-
-void qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver,
- virDomainObjPtr vm);
-
void qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool value);
-bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs,
- qemuDomainJob newJob);
-
int qemuDomainCheckDiskStartupPolicy(virQEMUDriverPtr driver,
virDomainObjPtr vm,
size_t diskIndex,
@@ -964,20 +741,6 @@ bool qemuDomainCheckABIStability(virQEMUDriverPtr driver,
bool qemuDomainAgentAvailable(virDomainObjPtr vm,
bool reportError);
-int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
- ATTRIBUTE_NONNULL(1);
-int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
- ATTRIBUTE_NONNULL(1);
-int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
- virDomainJobInfoPtr info)
- ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
-int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
- int *type,
- virTypedParameterPtr *params,
- int *nparams)
- ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
- ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
-
bool qemuDomainDiskBlockJobIsActive(virDomainDiskDefPtr disk);
bool qemuDomainHasBlockjob(virDomainObjPtr vm, bool copy_only)
ATTRIBUTE_NONNULL(1);
diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c
new file mode 100644
index 0000000000..7111acadda
--- /dev/null
+++ b/src/qemu/qemu_domainjob.c
@@ -0,0 +1,1192 @@
+/*
+ * qemu_domainjob.c: helper functions for QEMU domain jobs
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <
http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+
+#include "qemu_domain.h"
+#include "qemu_migration.h"
+#include "qemu_domainjob.h"
+#include "viralloc.h"
+#include "virlog.h"
+#include "virerror.h"
+#include "virtime.h"
+#include "virthreadjob.h"
+
+#define VIR_FROM_THIS VIR_FROM_QEMU
+
+VIR_LOG_INIT("qemu.qemu_domainjob");
+
+VIR_ENUM_IMPL(qemuDomainJob,
+ QEMU_JOB_LAST,
+ "none",
+ "query",
+ "destroy",
+ "suspend",
+ "modify",
+ "abort",
+ "migration operation",
+ "none", /* async job is never stored in job.active */
+ "async nested",
+);
+
+VIR_ENUM_IMPL(qemuDomainAgentJob,
+ QEMU_AGENT_JOB_LAST,
+ "none",
+ "query",
+ "modify",
+);
+
+VIR_ENUM_IMPL(qemuDomainAsyncJob,
+ QEMU_ASYNC_JOB_LAST,
+ "none",
+ "migration out",
+ "migration in",
+ "save",
+ "dump",
+ "snapshot",
+ "start",
+ "backup",
+);
+
+VIR_ENUM_IMPL(qemuDomainNamespace,
+ QEMU_DOMAIN_NS_LAST,
+ "mount",
+);
+
+
+const char *
+qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+ int phase G_GNUC_UNUSED)
+{
+ switch (job) {
+ case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case QEMU_ASYNC_JOB_MIGRATION_IN:
+ return qemuMigrationJobPhaseTypeToString(phase);
+
+ case QEMU_ASYNC_JOB_SAVE:
+ case QEMU_ASYNC_JOB_DUMP:
+ case QEMU_ASYNC_JOB_SNAPSHOT:
+ case QEMU_ASYNC_JOB_START:
+ case QEMU_ASYNC_JOB_NONE:
+ case QEMU_ASYNC_JOB_BACKUP:
+ G_GNUC_FALLTHROUGH;
+ case QEMU_ASYNC_JOB_LAST:
+ break;
+ }
+
+ return "none";
+}
+
+int
+qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+ const char *phase)
+{
+ if (!phase)
+ return 0;
+
+ switch (job) {
+ case QEMU_ASYNC_JOB_MIGRATION_OUT:
+ case QEMU_ASYNC_JOB_MIGRATION_IN:
+ return qemuMigrationJobPhaseTypeFromString(phase);
+
+ case QEMU_ASYNC_JOB_SAVE:
+ case QEMU_ASYNC_JOB_DUMP:
+ case QEMU_ASYNC_JOB_SNAPSHOT:
+ case QEMU_ASYNC_JOB_START:
+ case QEMU_ASYNC_JOB_NONE:
+ case QEMU_ASYNC_JOB_BACKUP:
+ G_GNUC_FALLTHROUGH;
+ case QEMU_ASYNC_JOB_LAST:
+ break;
+ }
+
+ if (STREQ(phase, "none"))
+ return 0;
+ else
+ return -1;
+}
+
+
+void
+qemuDomainJobInfoFree(qemuDomainJobInfoPtr info)
+{
+ g_free(info->errmsg);
+ g_free(info);
+}
+
+
+qemuDomainJobInfoPtr
+qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info)
+{
+ qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1);
+
+ memcpy(ret, info, sizeof(*info));
+
+ ret->errmsg = g_strdup(info->errmsg);
+
+ return ret;
+}
+
+void
+qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ virObjectEventPtr event;
+ virTypedParameterPtr params = NULL;
+ int nparams = 0;
+ int type;
+
+ if (!priv->job.completed)
+ return;
+
+ if (qemuDomainJobInfoToParams(priv->job.completed, &type,
+ ¶ms, &nparams) < 0) {
+ VIR_WARN("Could not get stats for completed job; domain %s",
+ vm->def->name);
+ }
+
+ event = virDomainEventJobCompletedNewFromObj(vm, params, nparams);
+ virObjectEventStateQueue(driver->domainEventState, event);
+}
+
+
+int
+qemuDomainObjInitJob(qemuDomainJobObjPtr job)
+{
+ memset(job, 0, sizeof(*job));
+
+ if (virCondInit(&job->cond) < 0)
+ return -1;
+
+ if (virCondInit(&job->asyncCond) < 0) {
+ virCondDestroy(&job->cond);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static void
+qemuDomainObjResetJob(qemuDomainJobObjPtr job)
+{
+ job->active = QEMU_JOB_NONE;
+ job->owner = 0;
+ job->ownerAPI = NULL;
+ job->started = 0;
+}
+
+
+static void
+qemuDomainObjResetAgentJob(qemuDomainJobObjPtr job)
+{
+ job->agentActive = QEMU_AGENT_JOB_NONE;
+ job->agentOwner = 0;
+ job->agentOwnerAPI = NULL;
+ job->agentStarted = 0;
+}
+
+
+static void
+qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job)
+{
+ job->asyncJob = QEMU_ASYNC_JOB_NONE;
+ job->asyncOwner = 0;
+ job->asyncOwnerAPI = NULL;
+ job->asyncStarted = 0;
+ job->phase = 0;
+ job->mask = QEMU_JOB_DEFAULT_MASK;
+ job->abortJob = false;
+ job->spiceMigration = false;
+ job->spiceMigrated = false;
+ job->dumpCompleted = false;
+ VIR_FREE(job->error);
+ g_clear_pointer(&job->current, qemuDomainJobInfoFree);
+ qemuMigrationParamsFree(job->migParams);
+ job->migParams = NULL;
+ job->apiFlags = 0;
+}
+
+void
+qemuDomainObjRestoreJob(virDomainObjPtr obj,
+ qemuDomainJobObjPtr job)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ memset(job, 0, sizeof(*job));
+ job->active = priv->job.active;
+ job->owner = priv->job.owner;
+ job->asyncJob = priv->job.asyncJob;
+ job->asyncOwner = priv->job.asyncOwner;
+ job->phase = priv->job.phase;
+ job->migParams = g_steal_pointer(&priv->job.migParams);
+ job->apiFlags = priv->job.apiFlags;
+
+ qemuDomainObjResetJob(&priv->job);
+ qemuDomainObjResetAsyncJob(&priv->job);
+}
+
+void
+qemuDomainObjFreeJob(qemuDomainJobObjPtr job)
+{
+ qemuDomainObjResetJob(job);
+ qemuDomainObjResetAsyncJob(job);
+ g_clear_pointer(&job->current, qemuDomainJobInfoFree);
+ g_clear_pointer(&job->completed, qemuDomainJobInfoFree);
+ virCondDestroy(&job->cond);
+ virCondDestroy(&job->asyncCond);
+}
+
+bool
+qemuDomainTrackJob(qemuDomainJob job)
+{
+ return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
+}
+
+
+int
+qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
+{
+ unsigned long long now;
+
+ if (!jobInfo->started)
+ return 0;
+
+ if (virTimeMillisNow(&now) < 0)
+ return -1;
+
+ if (now < jobInfo->started) {
+ VIR_WARN("Async job starts in the future");
+ jobInfo->started = 0;
+ return 0;
+ }
+
+ jobInfo->timeElapsed = now - jobInfo->started;
+ return 0;
+}
+
+int
+qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
+{
+ unsigned long long now;
+
+ if (!jobInfo->stopped)
+ return 0;
+
+ if (virTimeMillisNow(&now) < 0)
+ return -1;
+
+ if (now < jobInfo->stopped) {
+ VIR_WARN("Guest's CPUs stopped in the future");
+ jobInfo->stopped = 0;
+ return 0;
+ }
+
+ jobInfo->stats.mig.downtime = now - jobInfo->stopped;
+ jobInfo->stats.mig.downtime_set = true;
+ return 0;
+}
+
+static virDomainJobType
+qemuDomainJobStatusToType(qemuDomainJobStatus status)
+{
+ switch (status) {
+ case QEMU_DOMAIN_JOB_STATUS_NONE:
+ break;
+
+ case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
+ case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
+ case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
+ case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
+ case QEMU_DOMAIN_JOB_STATUS_PAUSED:
+ return VIR_DOMAIN_JOB_UNBOUNDED;
+
+ case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
+ return VIR_DOMAIN_JOB_COMPLETED;
+
+ case QEMU_DOMAIN_JOB_STATUS_FAILED:
+ return VIR_DOMAIN_JOB_FAILED;
+
+ case QEMU_DOMAIN_JOB_STATUS_CANCELED:
+ return VIR_DOMAIN_JOB_CANCELLED;
+ }
+
+ return VIR_DOMAIN_JOB_NONE;
+}
+
+int
+qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
+ virDomainJobInfoPtr info)
+{
+ info->type = qemuDomainJobStatusToType(jobInfo->status);
+ info->timeElapsed = jobInfo->timeElapsed;
+
+ switch (jobInfo->statsType) {
+ case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
+ info->memTotal = jobInfo->stats.mig.ram_total;
+ info->memRemaining = jobInfo->stats.mig.ram_remaining;
+ info->memProcessed = jobInfo->stats.mig.ram_transferred;
+ info->fileTotal = jobInfo->stats.mig.disk_total +
+ jobInfo->mirrorStats.total;
+ info->fileRemaining = jobInfo->stats.mig.disk_remaining +
+ (jobInfo->mirrorStats.total -
+ jobInfo->mirrorStats.transferred);
+ info->fileProcessed = jobInfo->stats.mig.disk_transferred +
+ jobInfo->mirrorStats.transferred;
+ break;
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
+ info->memTotal = jobInfo->stats.mig.ram_total;
+ info->memRemaining = jobInfo->stats.mig.ram_remaining;
+ info->memProcessed = jobInfo->stats.mig.ram_transferred;
+ break;
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
+ info->memTotal = jobInfo->stats.dump.total;
+ info->memProcessed = jobInfo->stats.dump.completed;
+ info->memRemaining = info->memTotal - info->memProcessed;
+ break;
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
+ info->fileTotal = jobInfo->stats.backup.total;
+ info->fileProcessed = jobInfo->stats.backup.transferred;
+ info->fileRemaining = info->fileTotal - info->fileProcessed;
+ break;
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
+ break;
+ }
+
+ info->dataTotal = info->memTotal + info->fileTotal;
+ info->dataRemaining = info->memRemaining + info->fileRemaining;
+ info->dataProcessed = info->memProcessed + info->fileProcessed;
+
+ return 0;
+}
+
+
+static int
+qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
+ int *type,
+ virTypedParameterPtr *params,
+ int *nparams)
+{
+ qemuMonitorMigrationStats *stats = &jobInfo->stats.mig;
+ qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats;
+ virTypedParameterPtr par = NULL;
+ int maxpar = 0;
+ int npar = 0;
+ unsigned long long mirrorRemaining = mirrorStats->total -
+ mirrorStats->transferred;
+
+ if (virTypedParamsAddInt(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_OPERATION,
+ jobInfo->operation) < 0)
+ goto error;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_TIME_ELAPSED,
+ jobInfo->timeElapsed) < 0)
+ goto error;
+
+ if (jobInfo->timeDeltaSet &&
+ jobInfo->timeElapsed > jobInfo->timeDelta &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_TIME_ELAPSED_NET,
+ jobInfo->timeElapsed - jobInfo->timeDelta) < 0)
+ goto error;
+
+ if (stats->downtime_set &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DOWNTIME,
+ stats->downtime) < 0)
+ goto error;
+
+ if (stats->downtime_set &&
+ jobInfo->timeDeltaSet &&
+ stats->downtime > jobInfo->timeDelta &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DOWNTIME_NET,
+ stats->downtime - jobInfo->timeDelta) < 0)
+ goto error;
+
+ if (stats->setup_time_set &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_SETUP_TIME,
+ stats->setup_time) < 0)
+ goto error;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DATA_TOTAL,
+ stats->ram_total +
+ stats->disk_total +
+ mirrorStats->total) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DATA_PROCESSED,
+ stats->ram_transferred +
+ stats->disk_transferred +
+ mirrorStats->transferred) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DATA_REMAINING,
+ stats->ram_remaining +
+ stats->disk_remaining +
+ mirrorRemaining) < 0)
+ goto error;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_TOTAL,
+ stats->ram_total) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_PROCESSED,
+ stats->ram_transferred) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_REMAINING,
+ stats->ram_remaining) < 0)
+ goto error;
+
+ if (stats->ram_bps &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_BPS,
+ stats->ram_bps) < 0)
+ goto error;
+
+ if (stats->ram_duplicate_set) {
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_CONSTANT,
+ stats->ram_duplicate) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_NORMAL,
+ stats->ram_normal) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES,
+ stats->ram_normal_bytes) < 0)
+ goto error;
+ }
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+ stats->ram_dirty_rate) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_ITERATION,
+ stats->ram_iteration) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_POSTCOPY_REQS,
+ stats->ram_postcopy_reqs) < 0)
+ goto error;
+
+ if (stats->ram_page_size > 0 &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_PAGE_SIZE,
+ stats->ram_page_size) < 0)
+ goto error;
+
+ /* The remaining stats are disk, mirror, or migration specific
+ * so if this is a SAVEDUMP, we can just skip them */
+ if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
+ goto done;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DISK_TOTAL,
+ stats->disk_total +
+ mirrorStats->total) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DISK_PROCESSED,
+ stats->disk_transferred +
+ mirrorStats->transferred) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DISK_REMAINING,
+ stats->disk_remaining +
+ mirrorRemaining) < 0)
+ goto error;
+
+ if (stats->disk_bps &&
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_DISK_BPS,
+ stats->disk_bps) < 0)
+ goto error;
+
+ if (stats->xbzrle_set) {
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_COMPRESSION_CACHE,
+ stats->xbzrle_cache_size) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_COMPRESSION_BYTES,
+ stats->xbzrle_bytes) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_COMPRESSION_PAGES,
+ stats->xbzrle_pages) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES,
+ stats->xbzrle_cache_miss) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW,
+ stats->xbzrle_overflow) < 0)
+ goto error;
+ }
+
+ if (stats->cpu_throttle_percentage &&
+ virTypedParamsAddInt(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE,
+ stats->cpu_throttle_percentage) < 0)
+ goto error;
+
+ done:
+ *type = qemuDomainJobStatusToType(jobInfo->status);
+ *params = par;
+ *nparams = npar;
+ return 0;
+
+ error:
+ virTypedParamsFree(par, npar);
+ return -1;
+}
+
+
+static int
+qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
+ int *type,
+ virTypedParameterPtr *params,
+ int *nparams)
+{
+ qemuMonitorDumpStats *stats = &jobInfo->stats.dump;
+ virTypedParameterPtr par = NULL;
+ int maxpar = 0;
+ int npar = 0;
+
+ if (virTypedParamsAddInt(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_OPERATION,
+ jobInfo->operation) < 0)
+ goto error;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_TIME_ELAPSED,
+ jobInfo->timeElapsed) < 0)
+ goto error;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_TOTAL,
+ stats->total) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_PROCESSED,
+ stats->completed) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_REMAINING,
+ stats->total - stats->completed) < 0)
+ goto error;
+
+ *type = qemuDomainJobStatusToType(jobInfo->status);
+ *params = par;
+ *nparams = npar;
+ return 0;
+
+ error:
+ virTypedParamsFree(par, npar);
+ return -1;
+}
+
+
+static int
+qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
+ int *type,
+ virTypedParameterPtr *params,
+ int *nparams)
+{
+ qemuDomainBackupStats *stats = &jobInfo->stats.backup;
+ g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1);
+
+ if (virTypedParamListAddInt(par, jobInfo->operation,
+ VIR_DOMAIN_JOB_OPERATION) < 0)
+ return -1;
+
+ if (virTypedParamListAddULLong(par, jobInfo->timeElapsed,
+ VIR_DOMAIN_JOB_TIME_ELAPSED) < 0)
+ return -1;
+
+ if (stats->transferred > 0 || stats->total > 0) {
+ if (virTypedParamListAddULLong(par, stats->total,
+ VIR_DOMAIN_JOB_DISK_TOTAL) < 0)
+ return -1;
+
+ if (virTypedParamListAddULLong(par, stats->transferred,
+ VIR_DOMAIN_JOB_DISK_PROCESSED) < 0)
+ return -1;
+
+ if (virTypedParamListAddULLong(par, stats->total - stats->transferred,
+ VIR_DOMAIN_JOB_DISK_REMAINING) < 0)
+ return -1;
+ }
+
+ if (stats->tmp_used > 0 || stats->tmp_total > 0) {
+ if (virTypedParamListAddULLong(par, stats->tmp_used,
+ VIR_DOMAIN_JOB_DISK_TEMP_USED) < 0)
+ return -1;
+
+ if (virTypedParamListAddULLong(par, stats->tmp_total,
+ VIR_DOMAIN_JOB_DISK_TEMP_TOTAL) < 0)
+ return -1;
+ }
+
+ if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
+ virTypedParamListAddBoolean(par,
+ jobInfo->status ==
QEMU_DOMAIN_JOB_STATUS_COMPLETED,
+ VIR_DOMAIN_JOB_SUCCESS) < 0)
+ return -1;
+
+ if (jobInfo->errmsg &&
+ virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) <
0)
+ return -1;
+
+ *nparams = virTypedParamListStealParams(par, params);
+ *type = qemuDomainJobStatusToType(jobInfo->status);
+ return 0;
+}
+
+
+int
+qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
+ int *type,
+ virTypedParameterPtr *params,
+ int *nparams)
+{
+ switch (jobInfo->statsType) {
+ case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
+ case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
+ return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams);
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
+ return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams);
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
+ return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams);
+
+ case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("invalid job statistics type"));
+ break;
+
+ default:
+ virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType);
+ break;
+ }
+
+ return -1;
+}
+
+
+void
+qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ int phase)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+ unsigned long long me = virThreadSelfID();
+
+ if (!priv->job.asyncJob)
+ return;
+
+ VIR_DEBUG("Setting '%s' phase to '%s'",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
+
+ if (priv->job.asyncOwner && me != priv->job.asyncOwner) {
+ VIR_WARN("'%s' async job is owned by thread %llu",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ priv->job.asyncOwner);
+ }
+
+ priv->job.phase = phase;
+ priv->job.asyncOwner = me;
+ qemuDomainObjSaveStatus(driver, obj);
+}
+
+void
+qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
+ unsigned long long allowedJobs)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ if (!priv->job.asyncJob)
+ return;
+
+ priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
+}
+
+void
+qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
+ qemuDomainObjResetJob(&priv->job);
+ qemuDomainObjResetAsyncJob(&priv->job);
+ qemuDomainObjSaveStatus(driver, obj);
+}
+
+void
+qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ VIR_DEBUG("Releasing ownership of '%s' async job",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+
+ if (priv->job.asyncOwner != virThreadSelfID()) {
+ VIR_WARN("'%s' async job is owned by thread %llu",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ priv->job.asyncOwner);
+ }
+ priv->job.asyncOwner = 0;
+}
+
+static bool
+qemuDomainNestedJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob)
+{
+ return !jobs->asyncJob ||
+ newJob == QEMU_JOB_NONE ||
+ (jobs->mask & JOB_MASK(newJob)) != 0;
+}
+
+bool
+qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob)
+{
+ return !jobs->active && qemuDomainNestedJobAllowed(jobs, newJob);
+}
+
+static bool
+qemuDomainObjCanSetJob(qemuDomainJobObjPtr job,
+ qemuDomainJob newJob,
+ qemuDomainAgentJob newAgentJob)
+{
+ return ((newJob == QEMU_JOB_NONE ||
+ job->active == QEMU_JOB_NONE) &&
+ (newAgentJob == QEMU_AGENT_JOB_NONE ||
+ job->agentActive == QEMU_AGENT_JOB_NONE));
+}
+
+/* Give up waiting for mutex after 30 seconds */
+#define QEMU_JOB_WAIT_TIME (1000ull * 30)
+
+/**
+ * qemuDomainObjBeginJobInternal:
+ * @driver: qemu driver
+ * @obj: domain object
+ * @job: qemuDomainJob to start
+ * @asyncJob: qemuDomainAsyncJob to start
+ * @nowait: don't wait trying to acquire @job
+ *
+ * Acquires job for a domain object which must be locked before
+ * calling. If there's already a job running waits up to
+ * QEMU_JOB_WAIT_TIME after which the functions fails reporting
+ * an error unless @nowait is set.
+ *
+ * If @nowait is true this function tries to acquire job and if
+ * it fails, then it returns immediately without waiting. No
+ * error is reported in this case.
+ *
+ * Returns: 0 on success,
+ * -2 if unable to start job because of timeout or
+ * maxQueuedJobs limit,
+ * -1 otherwise.
+ */
+static int ATTRIBUTE_NONNULL(1)
+qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainJob job,
+ qemuDomainAgentJob agentJob,
+ qemuDomainAsyncJob asyncJob,
+ bool nowait)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+ unsigned long long now;
+ unsigned long long then;
+ bool nested = job == QEMU_JOB_ASYNC_NESTED;
+ bool async = job == QEMU_JOB_ASYNC;
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
+ const char *blocker = NULL;
+ const char *agentBlocker = NULL;
+ int ret = -1;
+ unsigned long long duration = 0;
+ unsigned long long agentDuration = 0;
+ unsigned long long asyncDuration = 0;
+
+ VIR_DEBUG("Starting job: job=%s agentJob=%s asyncJob=%s "
+ "(vm=%p name=%s, current job=%s agentJob=%s async=%s)",
+ qemuDomainJobTypeToString(job),
+ qemuDomainAgentJobTypeToString(agentJob),
+ qemuDomainAsyncJobTypeToString(asyncJob),
+ obj, obj->def->name,
+ qemuDomainJobTypeToString(priv->job.active),
+ qemuDomainAgentJobTypeToString(priv->job.agentActive),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+
+ if (virTimeMillisNow(&now) < 0)
+ return -1;
+
+ priv->jobs_queued++;
+ then = now + QEMU_JOB_WAIT_TIME;
+
+ retry:
+ if ((!async && job != QEMU_JOB_DESTROY) &&
+ cfg->maxQueuedJobs &&
+ priv->jobs_queued > cfg->maxQueuedJobs) {
+ goto error;
+ }
+
+ while (!nested && !qemuDomainNestedJobAllowed(&priv->job, job)) {
+ if (nowait)
+ goto cleanup;
+
+ VIR_DEBUG("Waiting for async job (vm=%p name=%s)", obj,
obj->def->name);
+ if (virCondWaitUntil(&priv->job.asyncCond, &obj->parent.lock, then)
< 0)
+ goto error;
+ }
+
+ while (!qemuDomainObjCanSetJob(&priv->job, job, agentJob)) {
+ if (nowait)
+ goto cleanup;
+
+ VIR_DEBUG("Waiting for job (vm=%p name=%s)", obj,
obj->def->name);
+ if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) <
0)
+ goto error;
+ }
+
+ /* No job is active but a new async job could have been started while obj
+ * was unlocked, so we need to recheck it. */
+ if (!nested && !qemuDomainNestedJobAllowed(&priv->job, job))
+ goto retry;
+
+ ignore_value(virTimeMillisNow(&now));
+
+ if (job) {
+ qemuDomainObjResetJob(&priv->job);
+
+ if (job != QEMU_JOB_ASYNC) {
+ VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
+ qemuDomainJobTypeToString(job),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ obj, obj->def->name);
+ priv->job.active = job;
+ priv->job.owner = virThreadSelfID();
+ priv->job.ownerAPI = virThreadJobGet();
+ priv->job.started = now;
+ } else {
+ VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
+ qemuDomainAsyncJobTypeToString(asyncJob),
+ obj, obj->def->name);
+ qemuDomainObjResetAsyncJob(&priv->job);
+ priv->job.current = g_new0(qemuDomainJobInfo, 1);
+ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
+ priv->job.asyncJob = asyncJob;
+ priv->job.asyncOwner = virThreadSelfID();
+ priv->job.asyncOwnerAPI = virThreadJobGet();
+ priv->job.asyncStarted = now;
+ priv->job.current->started = now;
+ }
+ }
+
+ if (agentJob) {
+ qemuDomainObjResetAgentJob(&priv->job);
+
+ VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)",
+ qemuDomainAgentJobTypeToString(agentJob),
+ obj, obj->def->name,
+ qemuDomainJobTypeToString(priv->job.active),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+ priv->job.agentActive = agentJob;
+ priv->job.agentOwner = virThreadSelfID();
+ priv->job.agentOwnerAPI = virThreadJobGet();
+ priv->job.agentStarted = now;
+ }
+
+ if (qemuDomainTrackJob(job))
+ qemuDomainObjSaveStatus(driver, obj);
+
+ return 0;
+
+ error:
+ ignore_value(virTimeMillisNow(&now));
+ if (priv->job.active && priv->job.started)
+ duration = now - priv->job.started;
+ if (priv->job.agentActive && priv->job.agentStarted)
+ agentDuration = now - priv->job.agentStarted;
+ if (priv->job.asyncJob && priv->job.asyncStarted)
+ asyncDuration = now - priv->job.asyncStarted;
+
+ VIR_WARN("Cannot start job (%s, %s, %s) for domain %s; "
+ "current job is (%s, %s, %s) "
+ "owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) "
+ "for (%llus, %llus, %llus)",
+ qemuDomainJobTypeToString(job),
+ qemuDomainAgentJobTypeToString(agentJob),
+ qemuDomainAsyncJobTypeToString(asyncJob),
+ obj->def->name,
+ qemuDomainJobTypeToString(priv->job.active),
+ qemuDomainAgentJobTypeToString(priv->job.agentActive),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ priv->job.owner, NULLSTR(priv->job.ownerAPI),
+ priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI),
+ priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
+ priv->job.apiFlags,
+ duration / 1000, agentDuration / 1000, asyncDuration / 1000);
+
+ if (job) {
+ if (nested || qemuDomainNestedJobAllowed(&priv->job, job))
+ blocker = priv->job.ownerAPI;
+ else
+ blocker = priv->job.asyncOwnerAPI;
+ }
+
+ if (agentJob)
+ agentBlocker = priv->job.agentOwnerAPI;
+
+ if (errno == ETIMEDOUT) {
+ if (blocker && agentBlocker) {
+ virReportError(VIR_ERR_OPERATION_TIMEOUT,
+ _("cannot acquire state change "
+ "lock (held by monitor=%s agent=%s)"),
+ blocker, agentBlocker);
+ } else if (blocker) {
+ virReportError(VIR_ERR_OPERATION_TIMEOUT,
+ _("cannot acquire state change "
+ "lock (held by monitor=%s)"),
+ blocker);
+ } else if (agentBlocker) {
+ virReportError(VIR_ERR_OPERATION_TIMEOUT,
+ _("cannot acquire state change "
+ "lock (held by agent=%s)"),
+ agentBlocker);
+ } else {
+ virReportError(VIR_ERR_OPERATION_TIMEOUT, "%s",
+ _("cannot acquire state change lock"));
+ }
+ ret = -2;
+ } else if (cfg->maxQueuedJobs &&
+ priv->jobs_queued > cfg->maxQueuedJobs) {
+ if (blocker && agentBlocker) {
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ _("cannot acquire state change "
+ "lock (held by monitor=%s agent=%s) "
+ "due to max_queued limit"),
+ blocker, agentBlocker);
+ } else if (blocker) {
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ _("cannot acquire state change "
+ "lock (held by monitor=%s) "
+ "due to max_queued limit"),
+ blocker);
+ } else if (agentBlocker) {
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ _("cannot acquire state change "
+ "lock (held by agent=%s) "
+ "due to max_queued limit"),
+ agentBlocker);
+ } else {
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("cannot acquire state change lock "
+ "due to max_queued limit"));
+ }
+ ret = -2;
+ } else {
+ virReportSystemError(errno, "%s", _("cannot acquire job
mutex"));
+ }
+
+ cleanup:
+ priv->jobs_queued--;
+ return ret;
+}
+
+/*
+ * obj must be locked before calling
+ *
+ * This must be called by anything that will change the VM state
+ * in any way, or anything that will use the QEMU monitor.
+ *
+ * Successful calls must be followed by EndJob eventually
+ */
+int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainJob job)
+{
+ if (qemuDomainObjBeginJobInternal(driver, obj, job,
+ QEMU_AGENT_JOB_NONE,
+ QEMU_ASYNC_JOB_NONE, false) < 0)
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * qemuDomainObjBeginAgentJob:
+ *
+ * Grabs agent type of job. Use if caller talks to guest agent only.
+ *
+ * To end job call qemuDomainObjEndAgentJob.
+ */
+int
+qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainAgentJob agentJob)
+{
+ return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE,
+ agentJob,
+ QEMU_ASYNC_JOB_NONE, false);
+}
+
+int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainAsyncJob asyncJob,
+ virDomainJobOperation operation,
+ unsigned long apiFlags)
+{
+ qemuDomainObjPrivatePtr priv;
+
+ if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
+ QEMU_AGENT_JOB_NONE,
+ asyncJob, false) < 0)
+ return -1;
+
+ priv = obj->privateData;
+ priv->job.current->operation = operation;
+ priv->job.apiFlags = apiFlags;
+ return 0;
+}
+
+int
+qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainAsyncJob asyncJob)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ if (asyncJob != priv->job.asyncJob) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("unexpected async job %d type expected %d"),
+ asyncJob, priv->job.asyncJob);
+ return -1;
+ }
+
+ if (priv->job.asyncOwner != virThreadSelfID()) {
+ VIR_WARN("This thread doesn't seem to be the async job owner:
%llu",
+ priv->job.asyncOwner);
+ }
+
+ return qemuDomainObjBeginJobInternal(driver, obj,
+ QEMU_JOB_ASYNC_NESTED,
+ QEMU_AGENT_JOB_NONE,
+ QEMU_ASYNC_JOB_NONE,
+ false);
+}
+
+/**
+ * qemuDomainObjBeginJobNowait:
+ *
+ * @driver: qemu driver
+ * @obj: domain object
+ * @job: qemuDomainJob to start
+ *
+ * Acquires job for a domain object which must be locked before
+ * calling. If there's already a job running it returns
+ * immediately without any error reported.
+ *
+ * Returns: see qemuDomainObjBeginJobInternal
+ */
+int
+qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainJob job)
+{
+ return qemuDomainObjBeginJobInternal(driver, obj, job,
+ QEMU_AGENT_JOB_NONE,
+ QEMU_ASYNC_JOB_NONE, true);
+}
+
+/*
+ * obj must be locked and have a reference before calling
+ *
+ * To be called after completing the work associated with the
+ * earlier qemuDomainBeginJob() call
+ */
+void
+qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+ qemuDomainJob job = priv->job.active;
+
+ priv->jobs_queued--;
+
+ VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
+ qemuDomainJobTypeToString(job),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ obj, obj->def->name);
+
+ qemuDomainObjResetJob(&priv->job);
+ if (qemuDomainTrackJob(job))
+ qemuDomainObjSaveStatus(driver, obj);
+ /* We indeed need to wake up ALL threads waiting because
+ * grabbing a job requires checking more variables. */
+ virCondBroadcast(&priv->job.cond);
+}
+
+void
+qemuDomainObjEndAgentJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+ qemuDomainAgentJob agentJob = priv->job.agentActive;
+
+ priv->jobs_queued--;
+
+ VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
+ qemuDomainAgentJobTypeToString(agentJob),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ obj, obj->def->name);
+
+ qemuDomainObjResetAgentJob(&priv->job);
+ /* We indeed need to wake up ALL threads waiting because
+ * grabbing a job requires checking more variables. */
+ virCondBroadcast(&priv->job.cond);
+}
+
+void
+qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ priv->jobs_queued--;
+
+ VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ obj, obj->def->name);
+
+ qemuDomainObjResetAsyncJob(&priv->job);
+ qemuDomainObjSaveStatus(driver, obj);
+ virCondBroadcast(&priv->job.asyncCond);
+}
+
+void
+qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
+{
+ qemuDomainObjPrivatePtr priv = obj->privateData;
+
+ VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ obj, obj->def->name);
+
+ priv->job.abortJob = true;
+ virDomainObjBroadcast(obj);
+}
diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h
new file mode 100644
index 0000000000..124664354d
--- /dev/null
+++ b/src/qemu/qemu_domainjob.h
@@ -0,0 +1,269 @@
+/*
+ * qemu_domainjob.h: helper functions for QEMU domain jobs
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <
http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <glib-object.h>
+#include "qemu_migration_params.h"
+
+#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
+#define QEMU_JOB_DEFAULT_MASK \
+ (JOB_MASK(QEMU_JOB_QUERY) | \
+ JOB_MASK(QEMU_JOB_DESTROY) | \
+ JOB_MASK(QEMU_JOB_ABORT))
+
+/* Jobs which have to be tracked in domain state XML. */
+#define QEMU_DOMAIN_TRACK_JOBS \
+ (JOB_MASK(QEMU_JOB_DESTROY) | \
+ JOB_MASK(QEMU_JOB_ASYNC))
+
+/* Only 1 job is allowed at any time
+ * A job includes *all* monitor commands, even those just querying
+ * information, not merely actions */
+typedef enum {
+ QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
+ QEMU_JOB_QUERY, /* Doesn't change any state */
+ QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
+ QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
+ QEMU_JOB_MODIFY, /* May change state */
+ QEMU_JOB_ABORT, /* Abort current async job */
+ QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
+
+ /* The following two items must always be the last items before JOB_LAST */
+ QEMU_JOB_ASYNC, /* Asynchronous job */
+ QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
+
+ QEMU_JOB_LAST
+} qemuDomainJob;
+VIR_ENUM_DECL(qemuDomainJob);
+
+typedef enum {
+ QEMU_AGENT_JOB_NONE = 0, /* No agent job. */
+ QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */
+ QEMU_AGENT_JOB_MODIFY, /* May change state of domain */
+
+ QEMU_AGENT_JOB_LAST
+} qemuDomainAgentJob;
+VIR_ENUM_DECL(qemuDomainAgentJob);
+
+/* Async job consists of a series of jobs that may change state. Independent
+ * jobs that do not change state (and possibly others if explicitly allowed by
+ * current async job) are allowed to be run even if async job is active.
+ */
+typedef enum {
+ QEMU_ASYNC_JOB_NONE = 0,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ QEMU_ASYNC_JOB_SAVE,
+ QEMU_ASYNC_JOB_DUMP,
+ QEMU_ASYNC_JOB_SNAPSHOT,
+ QEMU_ASYNC_JOB_START,
+ QEMU_ASYNC_JOB_BACKUP,
+
+ QEMU_ASYNC_JOB_LAST
+} qemuDomainAsyncJob;
+VIR_ENUM_DECL(qemuDomainAsyncJob);
+
+typedef enum {
+ QEMU_DOMAIN_JOB_STATUS_NONE = 0,
+ QEMU_DOMAIN_JOB_STATUS_ACTIVE,
+ QEMU_DOMAIN_JOB_STATUS_MIGRATING,
+ QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
+ QEMU_DOMAIN_JOB_STATUS_PAUSED,
+ QEMU_DOMAIN_JOB_STATUS_POSTCOPY,
+ QEMU_DOMAIN_JOB_STATUS_COMPLETED,
+ QEMU_DOMAIN_JOB_STATUS_FAILED,
+ QEMU_DOMAIN_JOB_STATUS_CANCELED,
+} qemuDomainJobStatus;
+
+typedef enum {
+ QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0,
+ QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
+ QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP,
+ QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP,
+ QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP,
+} qemuDomainJobStatsType;
+
+
+typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats;
+typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr;
+struct _qemuDomainMirrorStats {
+ unsigned long long transferred;
+ unsigned long long total;
+};
+
+typedef struct _qemuDomainBackupStats qemuDomainBackupStats;
+struct _qemuDomainBackupStats {
+ unsigned long long transferred;
+ unsigned long long total;
+ unsigned long long tmp_used;
+ unsigned long long tmp_total;
+};
+
+typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
+typedef qemuDomainJobInfo *qemuDomainJobInfoPtr;
+struct _qemuDomainJobInfo {
+ qemuDomainJobStatus status;
+ virDomainJobOperation operation;
+ unsigned long long started; /* When the async job started */
+ unsigned long long stopped; /* When the domain's CPUs were stopped */
+ unsigned long long sent; /* When the source sent status info to the
+ destination (only for migrations). */
+ unsigned long long received; /* When the destination host received status
+ info from the source (migrations only). */
+ /* Computed values */
+ unsigned long long timeElapsed;
+ long long timeDelta; /* delta = received - sent, i.e., the difference
+ between the source and the destination time plus
+ the time between the end of Perform phase on the
+ source and the beginning of Finish phase on the
+ destination. */
+ bool timeDeltaSet;
+ /* Raw values from QEMU */
+ qemuDomainJobStatsType statsType;
+ union {
+ qemuMonitorMigrationStats mig;
+ qemuMonitorDumpStats dump;
+ qemuDomainBackupStats backup;
+ } stats;
+ qemuDomainMirrorStats mirrorStats;
+
+ char *errmsg; /* optional error message for failed completed jobs */
+};
+
+void
+qemuDomainJobInfoFree(qemuDomainJobInfoPtr info);
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree);
+
+qemuDomainJobInfoPtr
+qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info);
+
+typedef struct _qemuDomainJobObj qemuDomainJobObj;
+typedef qemuDomainJobObj *qemuDomainJobObjPtr;
+struct _qemuDomainJobObj {
+ virCond cond; /* Use to coordinate jobs */
+
+ /* The following members are for QEMU_JOB_* */
+ qemuDomainJob active; /* Currently running job */
+ unsigned long long owner; /* Thread id which set current job */
+ const char *ownerAPI; /* The API which owns the job */
+ unsigned long long started; /* When the current job started */
+
+ /* The following members are for QEMU_AGENT_JOB_* */
+ qemuDomainAgentJob agentActive; /* Currently running agent job */
+ unsigned long long agentOwner; /* Thread id which set current agent job */
+ const char *agentOwnerAPI; /* The API which owns the agent job */
+ unsigned long long agentStarted; /* When the current agent job started */
+
+ /* The following members are for QEMU_ASYNC_JOB_* */
+ virCond asyncCond; /* Use to coordinate with async jobs */
+ qemuDomainAsyncJob asyncJob; /* Currently active async job */
+ unsigned long long asyncOwner; /* Thread which set current async job */
+ const char *asyncOwnerAPI; /* The API which owns the async job */
+ unsigned long long asyncStarted; /* When the current async job started */
+ int phase; /* Job phase (mainly for migrations) */
+ unsigned long long mask; /* Jobs allowed during async job */
+ qemuDomainJobInfoPtr current; /* async job progress data */
+ qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job
*/
+ bool abortJob; /* abort of the job requested */
+ bool spiceMigration; /* we asked for spice migration and we
+ * should wait for it to finish */
+ bool spiceMigrated; /* spice migration completed */
+ char *error; /* job event completion error */
+ bool dumpCompleted; /* dump completed */
+
+ qemuMigrationParamsPtr migParams;
+ unsigned long apiFlags; /* flags passed to the API which started the async job */
+};
+
+const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
+ int phase);
+int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
+ const char *phase);
+
+void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
+ virDomainObjPtr vm);
+
+int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainJob job)
+ G_GNUC_WARN_UNUSED_RESULT;
+int qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainAgentJob agentJob)
+ G_GNUC_WARN_UNUSED_RESULT;
+int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainAsyncJob asyncJob,
+ virDomainJobOperation operation,
+ unsigned long apiFlags)
+ G_GNUC_WARN_UNUSED_RESULT;
+int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainAsyncJob asyncJob)
+ G_GNUC_WARN_UNUSED_RESULT;
+int qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ qemuDomainJob job)
+ G_GNUC_WARN_UNUSED_RESULT;
+
+void qemuDomainObjEndJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj);
+void qemuDomainObjEndAgentJob(virDomainObjPtr obj);
+void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj);
+void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
+void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
+ virDomainObjPtr obj,
+ int phase);
+void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
+ unsigned long long allowedJobs);
+void qemuDomainObjRestoreJob(virDomainObjPtr obj,
+ qemuDomainJobObjPtr job);
+void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver,
+ virDomainObjPtr obj);
+void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj);
+
+void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver,
+ virDomainObjPtr vm);
+
+void qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver,
+ virDomainObjPtr vm);
+
+int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
+ ATTRIBUTE_NONNULL(1);
+int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
+ ATTRIBUTE_NONNULL(1);
+int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
+ virDomainJobInfoPtr info)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
+int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
+ int *type,
+ virTypedParameterPtr *params,
+ int *nparams)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
+ ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
+
+bool qemuDomainTrackJob(qemuDomainJob job);
+
+void qemuDomainObjFreeJob(qemuDomainJobObjPtr job);
+
+int qemuDomainObjInitJob(qemuDomainJobObjPtr job);
+
+bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob);
--
2.17.1