[GSoC][PATCH v4 0/8] creating hypervisor-agnostic domainjob

This series deals with removal of external dependencies of `qemu_domainjob`, followed by it being moved to `hypervisor/virdomainjob`, and renaming corresponding structures and functions for creating the hypervisor-agnostic domain-jobs. Previous series can be seen here[1]. The previous series was updated because: 1. The patch series was rebased on master, after the recent merge of `qemu_snapshot`, so that while checking it out, we can avoid conflicts. 2. Patch "qemu_domainjob: removed reference to `qemuDomainObjPrivatePtr`" was updated. 3. Patch "virdomainjob: moved `qemu_domainjob` to `hypervisor/virdomainjob`" was added to complete the series. This series is based on this patch (virmigraiton: `qemuMigrationJobPhase` transformed for more generic use)[2]. Please checkout that out first and then base this patch series over it. You can see all these changes on my github account[3]. [1]: https://www.redhat.com/archives/libvir-list/2020-August/msg00744.html [2]: https://www.redhat.com/archives/libvir-list/2020-August/msg00966.html [3]: https://github.com/pratham-pc/libvirt/tree/cms Prathamesh Chavan (8): qemu_domainjob: `qemuDomainJobInfo` code moved to `qemu_domain` qemu_domain: Added `qemuDomainJobInfo` to domainJob's `privateData` qemu_domainjob: callback functions added to access `jobs_queued` qemu_domainjob: callback function added to access `maxQueuedJobs` qemu_domainjob: `qemuDomainJobPrivateJobCallbacks` structure nested qemu_domainjob: add `saveDomainStatus` as a callback function to jobs qemu_domainjob: removed reference to `qemuDomainObjPrivatePtr` virdomainjob: moved `qemu_domainjob` to `hypervisor/virdomainjob` po/POTFILES.in | 2 +- po/libvirt.pot | 34 +- src/hypervisor/meson.build | 1 + src/hypervisor/virdomainjob.c | 792 ++++++++++++++++++ src/hypervisor/virdomainjob.h | 243 ++++++ src/libvirt_private.syms | 28 + src/qemu/meson.build | 1 - src/qemu/qemu_backup.c | 92 +-- src/qemu/qemu_backup.h | 5 +- src/qemu/qemu_block.c | 65 +- src/qemu/qemu_block.h | 18 +- src/qemu/qemu_blockjob.c | 77 +- src/qemu/qemu_blockjob.h | 3 +- src/qemu/qemu_checkpoint.c | 39 +- src/qemu/qemu_domain.c | 657 +++++++++++++-- src/qemu/qemu_domain.h | 106 ++- src/qemu/qemu_domainjob.c | 1291 ------------------------------ src/qemu/qemu_domainjob.h | 291 ------- src/qemu/qemu_driver.c | 1186 ++++++++++++++------------- src/qemu/qemu_hotplug.c | 335 ++++---- src/qemu/qemu_hotplug.h | 38 +- src/qemu/qemu_migration.c | 594 +++++++------- src/qemu/qemu_migration.h | 20 +- src/qemu/qemu_migration_cookie.c | 17 +- src/qemu/qemu_migration_params.c | 52 +- src/qemu/qemu_migration_params.h | 15 +- src/qemu/qemu_process.c | 462 ++++++----- src/qemu/qemu_process.h | 37 +- src/qemu/qemu_saveimage.c | 4 +- src/qemu/qemu_saveimage.h | 6 +- src/qemu/qemu_snapshot.c | 111 ++- tests/qemuhotplugtest.c | 2 +- 32 files changed, 3323 insertions(+), 3301 deletions(-) create mode 100644 src/hypervisor/virdomainjob.c create mode 100644 src/hypervisor/virdomainjob.h delete mode 100644 src/qemu/qemu_domainjob.c delete mode 100644 src/qemu/qemu_domainjob.h -- 2.25.1

As `qemuDomainJobInfo` structure is highly specific for the qemu-hypervisor, and since we're planning to keep only hypervisor-agnostic structures, we move functions related `qemuDomainJobInfo` to `qemu_domain`. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 473 +++++++++++++++++++++++++++++++++++++ src/qemu/qemu_domain.h | 24 ++ src/qemu/qemu_domainjob.c | 477 -------------------------------------- src/qemu/qemu_domainjob.h | 17 -- 4 files changed, 497 insertions(+), 494 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 21f24fceed..4bee56a1be 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -76,6 +76,457 @@ VIR_LOG_INIT("qemu.qemu_domain"); +static virDomainJobType +qemuDomainJobStatusToType(qemuDomainJobStatus status) +{ + switch (status) { + case QEMU_DOMAIN_JOB_STATUS_NONE: + break; + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: + case QEMU_DOMAIN_JOB_STATUS_MIGRATING: + case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + case QEMU_DOMAIN_JOB_STATUS_PAUSED: + return VIR_DOMAIN_JOB_UNBOUNDED; + + case QEMU_DOMAIN_JOB_STATUS_COMPLETED: + return VIR_DOMAIN_JOB_COMPLETED; + + case QEMU_DOMAIN_JOB_STATUS_FAILED: + return VIR_DOMAIN_JOB_FAILED; + + case QEMU_DOMAIN_JOB_STATUS_CANCELED: + return VIR_DOMAIN_JOB_CANCELLED; + } + + return VIR_DOMAIN_JOB_NONE; +} + +int +qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) +{ + unsigned long long now; + + if (!jobInfo->started) + return 0; + + if (virTimeMillisNow(&now) < 0) + return -1; + + if (now < jobInfo->started) { + VIR_WARN("Async job starts in the future"); + jobInfo->started = 0; + return 0; + } + + jobInfo->timeElapsed = now - jobInfo->started; + return 0; +} + +int +qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) +{ + unsigned long long now; + + if (!jobInfo->stopped) + return 0; + + if (virTimeMillisNow(&now) < 0) + return -1; + + if (now < jobInfo->stopped) { + VIR_WARN("Guest's CPUs stopped in the future"); + jobInfo->stopped = 0; + return 0; + } + + jobInfo->stats.mig.downtime = now - jobInfo->stopped; + jobInfo->stats.mig.downtime_set = true; + return 0; +} + + +int +qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, + virDomainJobInfoPtr info) +{ + info->type = qemuDomainJobStatusToType(jobInfo->status); + info->timeElapsed = jobInfo->timeElapsed; + + switch (jobInfo->statsType) { + case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: + info->memTotal = jobInfo->stats.mig.ram_total; + info->memRemaining = jobInfo->stats.mig.ram_remaining; + info->memProcessed = jobInfo->stats.mig.ram_transferred; + info->fileTotal = jobInfo->stats.mig.disk_total + + jobInfo->mirrorStats.total; + info->fileRemaining = jobInfo->stats.mig.disk_remaining + + (jobInfo->mirrorStats.total - + jobInfo->mirrorStats.transferred); + info->fileProcessed = jobInfo->stats.mig.disk_transferred + + jobInfo->mirrorStats.transferred; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + info->memTotal = jobInfo->stats.mig.ram_total; + info->memRemaining = jobInfo->stats.mig.ram_remaining; + info->memProcessed = jobInfo->stats.mig.ram_transferred; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + info->memTotal = jobInfo->stats.dump.total; + info->memProcessed = jobInfo->stats.dump.completed; + info->memRemaining = info->memTotal - info->memProcessed; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + info->fileTotal = jobInfo->stats.backup.total; + info->fileProcessed = jobInfo->stats.backup.transferred; + info->fileRemaining = info->fileTotal - info->fileProcessed; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + break; + } + + info->dataTotal = info->memTotal + info->fileTotal; + info->dataRemaining = info->memRemaining + info->fileRemaining; + info->dataProcessed = info->memProcessed + info->fileProcessed; + + return 0; +} + + +static int +qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; + qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats; + virTypedParameterPtr par = NULL; + int maxpar = 0; + int npar = 0; + unsigned long long mirrorRemaining = mirrorStats->total - + mirrorStats->transferred; + + if (virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_OPERATION, + jobInfo->operation) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_TIME_ELAPSED, + jobInfo->timeElapsed) < 0) + goto error; + + if (jobInfo->timeDeltaSet && + jobInfo->timeElapsed > jobInfo->timeDelta && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_TIME_ELAPSED_NET, + jobInfo->timeElapsed - jobInfo->timeDelta) < 0) + goto error; + + if (stats->downtime_set && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DOWNTIME, + stats->downtime) < 0) + goto error; + + if (stats->downtime_set && + jobInfo->timeDeltaSet && + stats->downtime > jobInfo->timeDelta && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DOWNTIME_NET, + stats->downtime - jobInfo->timeDelta) < 0) + goto error; + + if (stats->setup_time_set && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_SETUP_TIME, + stats->setup_time) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DATA_TOTAL, + stats->ram_total + + stats->disk_total + + mirrorStats->total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DATA_PROCESSED, + stats->ram_transferred + + stats->disk_transferred + + mirrorStats->transferred) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DATA_REMAINING, + stats->ram_remaining + + stats->disk_remaining + + mirrorRemaining) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_TOTAL, + stats->ram_total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_PROCESSED, + stats->ram_transferred) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_REMAINING, + stats->ram_remaining) < 0) + goto error; + + if (stats->ram_bps && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_BPS, + stats->ram_bps) < 0) + goto error; + + if (stats->ram_duplicate_set) { + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_CONSTANT, + stats->ram_duplicate) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_NORMAL, + stats->ram_normal) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES, + stats->ram_normal_bytes) < 0) + goto error; + } + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + stats->ram_dirty_rate) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_ITERATION, + stats->ram_iteration) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_POSTCOPY_REQS, + stats->ram_postcopy_reqs) < 0) + goto error; + + if (stats->ram_page_size > 0 && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_PAGE_SIZE, + stats->ram_page_size) < 0) + goto error; + + /* The remaining stats are disk, mirror, or migration specific + * so if this is a SAVEDUMP, we can just skip them */ + if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) + goto done; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_TOTAL, + stats->disk_total + + mirrorStats->total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_PROCESSED, + stats->disk_transferred + + mirrorStats->transferred) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_REMAINING, + stats->disk_remaining + + mirrorRemaining) < 0) + goto error; + + if (stats->disk_bps && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_BPS, + stats->disk_bps) < 0) + goto error; + + if (stats->xbzrle_set) { + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_CACHE, + stats->xbzrle_cache_size) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_BYTES, + stats->xbzrle_bytes) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_PAGES, + stats->xbzrle_pages) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES, + stats->xbzrle_cache_miss) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW, + stats->xbzrle_overflow) < 0) + goto error; + } + + if (stats->cpu_throttle_percentage && + virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE, + stats->cpu_throttle_percentage) < 0) + goto error; + + done: + *type = qemuDomainJobStatusToType(jobInfo->status); + *params = par; + *nparams = npar; + return 0; + + error: + virTypedParamsFree(par, npar); + return -1; +} + + +static int +qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + qemuMonitorDumpStats *stats = &jobInfo->stats.dump; + virTypedParameterPtr par = NULL; + int maxpar = 0; + int npar = 0; + + if (virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_OPERATION, + jobInfo->operation) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_TIME_ELAPSED, + jobInfo->timeElapsed) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_TOTAL, + stats->total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_PROCESSED, + stats->completed) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_REMAINING, + stats->total - stats->completed) < 0) + goto error; + + *type = qemuDomainJobStatusToType(jobInfo->status); + *params = par; + *nparams = npar; + return 0; + + error: + virTypedParamsFree(par, npar); + return -1; +} + + +static int +qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + qemuDomainBackupStats *stats = &jobInfo->stats.backup; + g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); + + if (virTypedParamListAddInt(par, jobInfo->operation, + VIR_DOMAIN_JOB_OPERATION) < 0) + return -1; + + if (virTypedParamListAddULLong(par, jobInfo->timeElapsed, + VIR_DOMAIN_JOB_TIME_ELAPSED) < 0) + return -1; + + if (stats->transferred > 0 || stats->total > 0) { + if (virTypedParamListAddULLong(par, stats->total, + VIR_DOMAIN_JOB_DISK_TOTAL) < 0) + return -1; + + if (virTypedParamListAddULLong(par, stats->transferred, + VIR_DOMAIN_JOB_DISK_PROCESSED) < 0) + return -1; + + if (virTypedParamListAddULLong(par, stats->total - stats->transferred, + VIR_DOMAIN_JOB_DISK_REMAINING) < 0) + return -1; + } + + if (stats->tmp_used > 0 || stats->tmp_total > 0) { + if (virTypedParamListAddULLong(par, stats->tmp_used, + VIR_DOMAIN_JOB_DISK_TEMP_USED) < 0) + return -1; + + if (virTypedParamListAddULLong(par, stats->tmp_total, + VIR_DOMAIN_JOB_DISK_TEMP_TOTAL) < 0) + return -1; + } + + if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && + virTypedParamListAddBoolean(par, + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, + VIR_DOMAIN_JOB_SUCCESS) < 0) + return -1; + + if (jobInfo->errmsg && + virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) + return -1; + + *nparams = virTypedParamListStealParams(par, params); + *type = qemuDomainJobStatusToType(jobInfo->status); + return 0; +} + + +int +qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + switch (jobInfo->statsType) { + case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: + case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); + + case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); + + case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); + + case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("invalid job statistics type")); + break; + + default: + virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); + break; + } + + return -1; +} + + +void +qemuDomainJobInfoFree(qemuDomainJobInfoPtr info) +{ + g_free(info->errmsg); + g_free(info); +} + + +qemuDomainJobInfoPtr +qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info) +{ + qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1); + + memcpy(ret, info, sizeof(*info)); + + ret->errmsg = g_strdup(info->errmsg); + + return ret; +} + + static void * qemuJobAllocPrivate(void) { @@ -179,6 +630,28 @@ qemuDomainFormatJobPrivate(virBufferPtr buf, return 0; } +void +qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + virObjectEventPtr event; + virTypedParameterPtr params = NULL; + int nparams = 0; + int type; + + if (!priv->job.completed) + return; + + if (qemuDomainJobInfoToParams(priv->job.completed, &type, + ¶ms, &nparams) < 0) { + VIR_WARN("Could not get stats for completed job; domain %s", + vm->def->name); + } + + event = virDomainEventJobCompletedNewFromObj(vm, params, nparams); + virObjectEventStateQueue(driver->domainEventState, event); +} static int qemuDomainObjPrivateXMLParseJobNBDSource(xmlNodePtr node, diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index adba79aded..73085b1d91 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -493,6 +493,30 @@ struct _qemuDomainJobPrivate { qemuMigrationParamsPtr migParams; }; + +void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, + virDomainObjPtr vm); + +void +qemuDomainJobInfoFree(qemuDomainJobInfoPtr info); + +qemuDomainJobInfoPtr +qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info); + +int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) + ATTRIBUTE_NONNULL(1); +int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) + ATTRIBUTE_NONNULL(1); +int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, + virDomainJobInfoPtr info) + ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); +int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) + ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) + ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4); + int qemuDomainObjStartWorker(virDomainObjPtr dom); void qemuDomainObjStopWorker(virDomainObjPtr dom); diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 02f85e1b13..8f8daf0fcb 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -115,51 +115,6 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, return -1; } - -void -qemuDomainJobInfoFree(qemuDomainJobInfoPtr info) -{ - g_free(info->errmsg); - g_free(info); -} - - -qemuDomainJobInfoPtr -qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info) -{ - qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1); - - memcpy(ret, info, sizeof(*info)); - - ret->errmsg = g_strdup(info->errmsg); - - return ret; -} - -void -qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, - virDomainObjPtr vm) -{ - qemuDomainObjPrivatePtr priv = vm->privateData; - virObjectEventPtr event; - virTypedParameterPtr params = NULL; - int nparams = 0; - int type; - - if (!priv->job.completed) - return; - - if (qemuDomainJobInfoToParams(priv->job.completed, &type, - ¶ms, &nparams) < 0) { - VIR_WARN("Could not get stats for completed job; domain %s", - vm->def->name); - } - - event = virDomainEventJobCompletedNewFromObj(vm, params, nparams); - virObjectEventStateQueue(driver->domainEventState, event); -} - - int qemuDomainObjInitJob(qemuDomainJobObjPtr job, qemuDomainObjPrivateJobCallbacksPtr cb) @@ -216,7 +171,6 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) job->mask = QEMU_JOB_DEFAULT_MASK; job->abortJob = false; VIR_FREE(job->error); - g_clear_pointer(&job->current, qemuDomainJobInfoFree); job->cb->resetJobPrivate(job->privateData); job->apiFlags = 0; } @@ -251,8 +205,6 @@ qemuDomainObjFreeJob(qemuDomainJobObjPtr job) qemuDomainObjResetJob(job); qemuDomainObjResetAsyncJob(job); job->cb->freeJobPrivate(job->privateData); - g_clear_pointer(&job->current, qemuDomainJobInfoFree); - g_clear_pointer(&job->completed, qemuDomainJobInfoFree); virCondDestroy(&job->cond); virCondDestroy(&job->asyncCond); } @@ -264,435 +216,6 @@ qemuDomainTrackJob(qemuDomainJob job) } -int -qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) -{ - unsigned long long now; - - if (!jobInfo->started) - return 0; - - if (virTimeMillisNow(&now) < 0) - return -1; - - if (now < jobInfo->started) { - VIR_WARN("Async job starts in the future"); - jobInfo->started = 0; - return 0; - } - - jobInfo->timeElapsed = now - jobInfo->started; - return 0; -} - -int -qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) -{ - unsigned long long now; - - if (!jobInfo->stopped) - return 0; - - if (virTimeMillisNow(&now) < 0) - return -1; - - if (now < jobInfo->stopped) { - VIR_WARN("Guest's CPUs stopped in the future"); - jobInfo->stopped = 0; - return 0; - } - - jobInfo->stats.mig.downtime = now - jobInfo->stopped; - jobInfo->stats.mig.downtime_set = true; - return 0; -} - -static virDomainJobType -qemuDomainJobStatusToType(qemuDomainJobStatus status) -{ - switch (status) { - case QEMU_DOMAIN_JOB_STATUS_NONE: - break; - - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: - return VIR_DOMAIN_JOB_UNBOUNDED; - - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - return VIR_DOMAIN_JOB_COMPLETED; - - case QEMU_DOMAIN_JOB_STATUS_FAILED: - return VIR_DOMAIN_JOB_FAILED; - - case QEMU_DOMAIN_JOB_STATUS_CANCELED: - return VIR_DOMAIN_JOB_CANCELLED; - } - - return VIR_DOMAIN_JOB_NONE; -} - -int -qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, - virDomainJobInfoPtr info) -{ - info->type = qemuDomainJobStatusToType(jobInfo->status); - info->timeElapsed = jobInfo->timeElapsed; - - switch (jobInfo->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; - info->fileTotal = jobInfo->stats.mig.disk_total + - jobInfo->mirrorStats.total; - info->fileRemaining = jobInfo->stats.mig.disk_remaining + - (jobInfo->mirrorStats.total - - jobInfo->mirrorStats.transferred); - info->fileProcessed = jobInfo->stats.mig.disk_transferred + - jobInfo->mirrorStats.transferred; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - info->memTotal = jobInfo->stats.dump.total; - info->memProcessed = jobInfo->stats.dump.completed; - info->memRemaining = info->memTotal - info->memProcessed; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - info->fileTotal = jobInfo->stats.backup.total; - info->fileProcessed = jobInfo->stats.backup.transferred; - info->fileRemaining = info->fileTotal - info->fileProcessed; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: - break; - } - - info->dataTotal = info->memTotal + info->fileTotal; - info->dataRemaining = info->memRemaining + info->fileRemaining; - info->dataProcessed = info->memProcessed + info->fileProcessed; - - return 0; -} - - -static int -qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; - qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats; - virTypedParameterPtr par = NULL; - int maxpar = 0; - int npar = 0; - unsigned long long mirrorRemaining = mirrorStats->total - - mirrorStats->transferred; - - if (virTypedParamsAddInt(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_OPERATION, - jobInfo->operation) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) - goto error; - - if (jobInfo->timeDeltaSet && - jobInfo->timeElapsed > jobInfo->timeDelta && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_TIME_ELAPSED_NET, - jobInfo->timeElapsed - jobInfo->timeDelta) < 0) - goto error; - - if (stats->downtime_set && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DOWNTIME, - stats->downtime) < 0) - goto error; - - if (stats->downtime_set && - jobInfo->timeDeltaSet && - stats->downtime > jobInfo->timeDelta && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DOWNTIME_NET, - stats->downtime - jobInfo->timeDelta) < 0) - goto error; - - if (stats->setup_time_set && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_SETUP_TIME, - stats->setup_time) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DATA_TOTAL, - stats->ram_total + - stats->disk_total + - mirrorStats->total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DATA_PROCESSED, - stats->ram_transferred + - stats->disk_transferred + - mirrorStats->transferred) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DATA_REMAINING, - stats->ram_remaining + - stats->disk_remaining + - mirrorRemaining) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_TOTAL, - stats->ram_total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_PROCESSED, - stats->ram_transferred) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_REMAINING, - stats->ram_remaining) < 0) - goto error; - - if (stats->ram_bps && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_BPS, - stats->ram_bps) < 0) - goto error; - - if (stats->ram_duplicate_set) { - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_CONSTANT, - stats->ram_duplicate) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_NORMAL, - stats->ram_normal) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES, - stats->ram_normal_bytes) < 0) - goto error; - } - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, - stats->ram_dirty_rate) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_ITERATION, - stats->ram_iteration) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_POSTCOPY_REQS, - stats->ram_postcopy_reqs) < 0) - goto error; - - if (stats->ram_page_size > 0 && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_PAGE_SIZE, - stats->ram_page_size) < 0) - goto error; - - /* The remaining stats are disk, mirror, or migration specific - * so if this is a SAVEDUMP, we can just skip them */ - if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) - goto done; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_TOTAL, - stats->disk_total + - mirrorStats->total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_PROCESSED, - stats->disk_transferred + - mirrorStats->transferred) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_REMAINING, - stats->disk_remaining + - mirrorRemaining) < 0) - goto error; - - if (stats->disk_bps && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_BPS, - stats->disk_bps) < 0) - goto error; - - if (stats->xbzrle_set) { - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_CACHE, - stats->xbzrle_cache_size) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_BYTES, - stats->xbzrle_bytes) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_PAGES, - stats->xbzrle_pages) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES, - stats->xbzrle_cache_miss) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW, - stats->xbzrle_overflow) < 0) - goto error; - } - - if (stats->cpu_throttle_percentage && - virTypedParamsAddInt(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE, - stats->cpu_throttle_percentage) < 0) - goto error; - - done: - *type = qemuDomainJobStatusToType(jobInfo->status); - *params = par; - *nparams = npar; - return 0; - - error: - virTypedParamsFree(par, npar); - return -1; -} - - -static int -qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - qemuMonitorDumpStats *stats = &jobInfo->stats.dump; - virTypedParameterPtr par = NULL; - int maxpar = 0; - int npar = 0; - - if (virTypedParamsAddInt(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_OPERATION, - jobInfo->operation) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_TOTAL, - stats->total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_PROCESSED, - stats->completed) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_REMAINING, - stats->total - stats->completed) < 0) - goto error; - - *type = qemuDomainJobStatusToType(jobInfo->status); - *params = par; - *nparams = npar; - return 0; - - error: - virTypedParamsFree(par, npar); - return -1; -} - - -static int -qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - qemuDomainBackupStats *stats = &jobInfo->stats.backup; - g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); - - if (virTypedParamListAddInt(par, jobInfo->operation, - VIR_DOMAIN_JOB_OPERATION) < 0) - return -1; - - if (virTypedParamListAddULLong(par, jobInfo->timeElapsed, - VIR_DOMAIN_JOB_TIME_ELAPSED) < 0) - return -1; - - if (stats->transferred > 0 || stats->total > 0) { - if (virTypedParamListAddULLong(par, stats->total, - VIR_DOMAIN_JOB_DISK_TOTAL) < 0) - return -1; - - if (virTypedParamListAddULLong(par, stats->transferred, - VIR_DOMAIN_JOB_DISK_PROCESSED) < 0) - return -1; - - if (virTypedParamListAddULLong(par, stats->total - stats->transferred, - VIR_DOMAIN_JOB_DISK_REMAINING) < 0) - return -1; - } - - if (stats->tmp_used > 0 || stats->tmp_total > 0) { - if (virTypedParamListAddULLong(par, stats->tmp_used, - VIR_DOMAIN_JOB_DISK_TEMP_USED) < 0) - return -1; - - if (virTypedParamListAddULLong(par, stats->tmp_total, - VIR_DOMAIN_JOB_DISK_TEMP_TOTAL) < 0) - return -1; - } - - if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && - virTypedParamListAddBoolean(par, - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, - VIR_DOMAIN_JOB_SUCCESS) < 0) - return -1; - - if (jobInfo->errmsg && - virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) - return -1; - - *nparams = virTypedParamListStealParams(par, params); - *type = qemuDomainJobStatusToType(jobInfo->status); - return 0; -} - - -int -qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - switch (jobInfo->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); - - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); - - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); - - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("invalid job statistics type")); - break; - - default: - virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); - break; - } - - return -1; -} - - void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, virDomainObjPtr obj, diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index fcbe3fe112..84b60a466a 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -215,9 +215,6 @@ const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, const char *phase); -void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, - virDomainObjPtr vm); - int qemuDomainObjBeginJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainJob job) @@ -258,20 +255,6 @@ void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj); void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj); -int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) - ATTRIBUTE_NONNULL(1); -int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) - ATTRIBUTE_NONNULL(1); -int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, - virDomainJobInfoPtr info) - ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); -int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) - ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) - ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4); - bool qemuDomainTrackJob(qemuDomainJob job); void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); -- 2.25.1

As `qemuDomainJobInfo` had attributes specific to qemu hypervisor's jobs, we moved the attribute `current` and `completed` from `qemuDomainJobObj` to its `privateData` structure. In this process, two callback functions: `setJobInfoOperation` and `currentJobInfoInit` were introduced to qemuDomainJob's callback structure. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 22 ++++++----- src/qemu/qemu_domain.c | 29 +++++++++++++- src/qemu/qemu_domain.h | 50 ++++++++++++++++++++++++ src/qemu/qemu_domainjob.c | 6 +-- src/qemu/qemu_domainjob.h | 67 ++++---------------------------- src/qemu/qemu_driver.c | 46 ++++++++++++++-------- src/qemu/qemu_migration.c | 62 ++++++++++++++++------------- src/qemu/qemu_migration_cookie.c | 8 ++-- src/qemu/qemu_process.c | 32 ++++++++------- src/qemu/qemu_snapshot.c | 4 +- 10 files changed, 187 insertions(+), 139 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index a402730d38..1822c6f267 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -529,20 +529,21 @@ qemuBackupJobTerminate(virDomainObjPtr vm, { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; size_t i; - qemuDomainJobInfoUpdateTime(priv->job.current); + qemuDomainJobInfoUpdateTime(jobPriv->current); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); - priv->job.completed = qemuDomainJobInfoCopy(priv->job.current); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); + jobPriv->completed = qemuDomainJobInfoCopy(jobPriv->current); - priv->job.completed->stats.backup.total = priv->backup->push_total; - priv->job.completed->stats.backup.transferred = priv->backup->push_transferred; - priv->job.completed->stats.backup.tmp_used = priv->backup->pull_tmp_used; - priv->job.completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; + jobPriv->completed->stats.backup.total = priv->backup->push_total; + jobPriv->completed->stats.backup.transferred = priv->backup->push_transferred; + jobPriv->completed->stats.backup.tmp_used = priv->backup->pull_tmp_used; + jobPriv->completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; - priv->job.completed->status = jobstatus; - priv->job.completed->errmsg = g_strdup(priv->backup->errmsg); + jobPriv->completed->status = jobstatus; + jobPriv->completed->errmsg = g_strdup(priv->backup->errmsg); qemuDomainEventEmitJobCompleted(priv->driver, vm); @@ -694,6 +695,7 @@ qemuBackupBegin(virDomainObjPtr vm, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virDomainBackupDef) def = NULL; g_autofree char *suffix = NULL; @@ -745,7 +747,7 @@ qemuBackupBegin(virDomainObjPtr vm, qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_MODIFY))); - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 4bee56a1be..42efbc3230 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -543,6 +543,8 @@ qemuJobFreePrivate(void *opaque) return; qemuMigrationParamsFree(priv->migParams); + g_clear_pointer(&priv->current, qemuDomainJobInfoFree); + g_clear_pointer(&priv->completed, qemuDomainJobInfoFree); VIR_FREE(priv); } @@ -556,6 +558,7 @@ qemuJobResetPrivate(void *opaque) priv->spiceMigrated = false; priv->dumpCompleted = false; qemuMigrationParamsFree(priv->migParams); + g_clear_pointer(&priv->current, qemuDomainJobInfoFree); priv->migParams = NULL; } @@ -630,20 +633,40 @@ qemuDomainFormatJobPrivate(virBufferPtr buf, return 0; } +static void +qemuDomainCurrentJobInfoInit(qemuDomainJobObjPtr job, + unsigned long long now) +{ + qemuDomainJobPrivatePtr priv = job->privateData; + priv->current = g_new0(qemuDomainJobInfo, 1); + priv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->current->started = now; + +} + +static void +qemuDomainJobInfoSetOperation(qemuDomainJobObjPtr job, + virDomainJobOperation operation) +{ + qemuDomainJobPrivatePtr priv = job->privateData; + priv->current->operation = operation; +} + void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virObjectEventPtr event; virTypedParameterPtr params = NULL; int nparams = 0; int type; - if (!priv->job.completed) + if (!jobPriv->completed) return; - if (qemuDomainJobInfoToParams(priv->job.completed, &type, + if (qemuDomainJobInfoToParams(jobPriv->completed, &type, ¶ms, &nparams) < 0) { VIR_WARN("Could not get stats for completed job; domain %s", vm->def->name); @@ -760,6 +783,8 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .resetJobPrivate = qemuJobResetPrivate, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, + .setJobInfoOperation = qemuDomainJobInfoSetOperation, + .currentJobInfoInit = qemuDomainCurrentJobInfoInit, }; /** diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 73085b1d91..f77a377e30 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -483,6 +483,52 @@ struct _qemuDomainXmlNsDef { char **capsdel; }; +typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats; +typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr; +struct _qemuDomainMirrorStats { + unsigned long long transferred; + unsigned long long total; +}; + +typedef struct _qemuDomainBackupStats qemuDomainBackupStats; +struct _qemuDomainBackupStats { + unsigned long long transferred; + unsigned long long total; + unsigned long long tmp_used; + unsigned long long tmp_total; +}; + +typedef struct _qemuDomainJobInfo qemuDomainJobInfo; +typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; +struct _qemuDomainJobInfo { + qemuDomainJobStatus status; + virDomainJobOperation operation; + unsigned long long started; /* When the async job started */ + unsigned long long stopped; /* When the domain's CPUs were stopped */ + unsigned long long sent; /* When the source sent status info to the + destination (only for migrations). */ + unsigned long long received; /* When the destination host received status + info from the source (migrations only). */ + /* Computed values */ + unsigned long long timeElapsed; + long long timeDelta; /* delta = received - sent, i.e., the difference + between the source and the destination time plus + the time between the end of Perform phase on the + source and the beginning of Finish phase on the + destination. */ + bool timeDeltaSet; + /* Raw values from QEMU */ + qemuDomainJobStatsType statsType; + union { + qemuMonitorMigrationStats mig; + qemuMonitorDumpStats dump; + qemuDomainBackupStats backup; + } stats; + qemuDomainMirrorStats mirrorStats; + + char *errmsg; /* optional error message for failed completed jobs */ +}; + typedef struct _qemuDomainJobPrivate qemuDomainJobPrivate; typedef qemuDomainJobPrivate *qemuDomainJobPrivatePtr; struct _qemuDomainJobPrivate { @@ -491,6 +537,8 @@ struct _qemuDomainJobPrivate { bool spiceMigrated; /* spice migration completed */ bool dumpCompleted; /* dump completed */ qemuMigrationParamsPtr migParams; + qemuDomainJobInfoPtr current; /* async job progress data */ + qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */ }; @@ -500,6 +548,8 @@ void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, void qemuDomainJobInfoFree(qemuDomainJobInfoPtr info); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree); + qemuDomainJobInfoPtr qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info); diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 8f8daf0fcb..c100262e49 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -417,13 +417,11 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - priv->job.current = g_new0(qemuDomainJobInfo, 1); - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->job.cb->currentJobInfoInit(&priv->job, now); priv->job.asyncJob = asyncJob; priv->job.asyncOwner = virThreadSelfID(); priv->job.asyncOwnerAPI = virThreadJobGet(); priv->job.asyncStarted = now; - priv->job.current->started = now; } } @@ -589,7 +587,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, return -1; priv = obj->privateData; - priv->job.current->operation = operation; + priv->job.cb->setJobInfoOperation(&priv->job, operation); priv->job.apiFlags = apiFlags; return 0; } diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 84b60a466a..c1c68719a6 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -19,7 +19,6 @@ #pragma once #include <glib-object.h> -#include "qemu_monitor.h" #define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) #define QEMU_JOB_DEFAULT_MASK \ @@ -99,61 +98,6 @@ typedef enum { QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP, } qemuDomainJobStatsType; - -typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats; -typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr; -struct _qemuDomainMirrorStats { - unsigned long long transferred; - unsigned long long total; -}; - -typedef struct _qemuDomainBackupStats qemuDomainBackupStats; -struct _qemuDomainBackupStats { - unsigned long long transferred; - unsigned long long total; - unsigned long long tmp_used; - unsigned long long tmp_total; -}; - -typedef struct _qemuDomainJobInfo qemuDomainJobInfo; -typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; -struct _qemuDomainJobInfo { - qemuDomainJobStatus status; - virDomainJobOperation operation; - unsigned long long started; /* When the async job started */ - unsigned long long stopped; /* When the domain's CPUs were stopped */ - unsigned long long sent; /* When the source sent status info to the - destination (only for migrations). */ - unsigned long long received; /* When the destination host received status - info from the source (migrations only). */ - /* Computed values */ - unsigned long long timeElapsed; - long long timeDelta; /* delta = received - sent, i.e., the difference - between the source and the destination time plus - the time between the end of Perform phase on the - source and the beginning of Finish phase on the - destination. */ - bool timeDeltaSet; - /* Raw values from QEMU */ - qemuDomainJobStatsType statsType; - union { - qemuMonitorMigrationStats mig; - qemuMonitorDumpStats dump; - qemuDomainBackupStats backup; - } stats; - qemuDomainMirrorStats mirrorStats; - - char *errmsg; /* optional error message for failed completed jobs */ -}; - -void -qemuDomainJobInfoFree(qemuDomainJobInfoPtr info); - -G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree); - -qemuDomainJobInfoPtr -qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info); - typedef struct _qemuDomainJobObj qemuDomainJobObj; typedef qemuDomainJobObj *qemuDomainJobObjPtr; @@ -163,9 +107,12 @@ typedef void (*qemuDomainObjPrivateJobReset)(void *); typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, qemuDomainJobObjPtr, virDomainObjPtr); -typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr, - qemuDomainJobObjPtr, +typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr, qemuDomainJobObjPtr, virDomainObjPtr); +typedef void (*qemuDomainObjJobInfoSetOperation)(qemuDomainJobObjPtr, + virDomainJobOperation); +typedef void (*qemuDomainObjCurrentJobInfoInit)(qemuDomainJobObjPtr, + unsigned long long); typedef struct _qemuDomainObjPrivateJobCallbacks qemuDomainObjPrivateJobCallbacks; typedef qemuDomainObjPrivateJobCallbacks *qemuDomainObjPrivateJobCallbacksPtr; @@ -175,6 +122,8 @@ struct _qemuDomainObjPrivateJobCallbacks { qemuDomainObjPrivateJobReset resetJobPrivate; qemuDomainObjPrivateJobFormat formatJob; qemuDomainObjPrivateJobParse parseJob; + qemuDomainObjJobInfoSetOperation setJobInfoOperation; + qemuDomainObjCurrentJobInfoInit currentJobInfoInit; }; struct _qemuDomainJobObj { @@ -200,8 +149,6 @@ struct _qemuDomainJobObj { unsigned long long asyncStarted; /* When the current async job started */ int phase; /* Job phase (mainly for migrations) */ unsigned long long mask; /* Jobs allowed during async job */ - qemuDomainJobInfoPtr current; /* async job progress data */ - qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */ bool abortJob; /* abort of the job requested */ char *error; /* job event completion error */ unsigned long apiFlags; /* flags passed to the API which started the async job */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 3636716cee..806577a70a 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2694,6 +2694,7 @@ qemuDomainGetControlInfo(virDomainPtr dom, { virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; int ret = -1; virCheckFlags(0, -1); @@ -2708,6 +2709,7 @@ qemuDomainGetControlInfo(virDomainPtr dom, goto cleanup; priv = vm->privateData; + jobPriv = priv->job.privateData; memset(info, 0, sizeof(*info)); @@ -2717,9 +2719,9 @@ qemuDomainGetControlInfo(virDomainPtr dom, } else if (priv->job.active) { if (virTimeMillisNow(&info->stateTime) < 0) goto cleanup; - if (priv->job.current) { + if (jobPriv->current) { info->state = VIR_DOMAIN_CONTROL_JOB; - info->stateTime -= priv->job.current->started; + info->stateTime -= jobPriv->current->started; } else { if (priv->monStart > 0) { info->state = VIR_DOMAIN_CONTROL_OCCUPIED; @@ -2762,6 +2764,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, int ret = -1; virObjectEventPtr event = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virQEMUSaveDataPtr data = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL; @@ -2778,7 +2781,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, goto endjob; } - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Pause */ if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { @@ -3082,7 +3085,7 @@ qemuDumpWaitForCompletion(virDomainObjPtr vm) return -1; } - if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { + if (jobPriv->current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { if (priv->job.error) virReportError(VIR_ERR_OPERATION_FAILED, _("memory-only dump failed: %s"), @@ -3093,7 +3096,7 @@ qemuDumpWaitForCompletion(virDomainObjPtr vm) return -1; } - qemuDomainJobInfoUpdateTime(priv->job.current); + qemuDomainJobInfoUpdateTime(jobPriv->current); return 0; } @@ -3107,6 +3110,7 @@ qemuDumpToFd(virQEMUDriverPtr driver, const char *dumpformat) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; bool detach = false; int ret = -1; @@ -3122,9 +3126,9 @@ qemuDumpToFd(virQEMUDriverPtr driver, return -1; if (detach) - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; else - g_clear_pointer(&priv->job.current, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->current, qemuDomainJobInfoFree); if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) return -1; @@ -3262,6 +3266,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv = NULL; + qemuDomainJobPrivatePtr jobPriv; bool resume = false, paused = false; int ret = -1; virObjectEventPtr event = NULL; @@ -3286,7 +3291,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, goto endjob; priv = vm->privateData; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv = priv->job.privateData; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Migrate will always stop the VM, so the resume condition is independent of whether the stop command is issued. */ @@ -6499,6 +6505,7 @@ qemuDomainObjStart(virConnectPtr conn, bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0; unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0; start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0; @@ -6522,8 +6529,8 @@ qemuDomainObjStart(virConnectPtr conn, } vm->hasManagedSave = false; } else { - virDomainJobOperation op = priv->job.current->operation; - priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE; + virDomainJobOperation op = jobPriv->current->operation; + jobPriv->current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE; ret = qemuDomainObjRestore(conn, driver, vm, managed_save, start_paused, bypass_cache, asyncJob); @@ -6541,7 +6548,7 @@ qemuDomainObjStart(virConnectPtr conn, return ret; } else { VIR_WARN("Ignoring incomplete managed state %s", managed_save); - priv->job.current->operation = op; + jobPriv->current->operation = op; vm->hasManagedSave = false; } } @@ -12600,13 +12607,14 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, qemuDomainJobInfoPtr *jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; int ret = -1; *jobInfo = NULL; if (completed) { - if (priv->job.completed && !priv->job.current) - *jobInfo = qemuDomainJobInfoCopy(priv->job.completed); + if (jobPriv->completed && !jobPriv->current) + *jobInfo = qemuDomainJobInfoCopy(jobPriv->completed); return 0; } @@ -12624,11 +12632,11 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, if (virDomainObjCheckActive(vm) < 0) goto cleanup; - if (!priv->job.current) { + if (!jobPriv->current) { ret = 0; goto cleanup; } - *jobInfo = qemuDomainJobInfoCopy(priv->job.current); + *jobInfo = qemuDomainJobInfoCopy(jobPriv->current); switch ((*jobInfo)->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: @@ -12703,6 +12711,7 @@ qemuDomainGetJobStats(virDomainPtr dom, virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; g_autoptr(qemuDomainJobInfo) jobInfo = NULL; bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED); int ret = -1; @@ -12717,6 +12726,7 @@ qemuDomainGetJobStats(virDomainPtr dom, goto cleanup; priv = vm->privateData; + jobPriv = priv->job.privateData; if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) goto cleanup; @@ -12732,7 +12742,7 @@ qemuDomainGetJobStats(virDomainPtr dom, ret = qemuDomainJobInfoToParams(jobInfo, type, params, nparams); if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED)) - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); cleanup: virDomainObjEndAPI(&vm); @@ -12764,6 +12774,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; int reason; if (!(vm = qemuDomainObjFromDomain(dom))) @@ -12779,6 +12790,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) goto endjob; priv = vm->privateData; + jobPriv = priv->job.privateData; switch (priv->job.asyncJob) { case QEMU_ASYNC_JOB_NONE: @@ -12799,7 +12811,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) break; case QEMU_ASYNC_JOB_MIGRATION_OUT: - if ((priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || + if ((jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY))) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index a6f1da97bd..a45f87137f 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1008,6 +1008,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; int port; size_t i; unsigned long long mirror_speed = speed; @@ -1052,7 +1053,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, return -1; if (priv->job.abortJob) { - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); @@ -1070,7 +1071,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, } qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - priv->job.current); + jobPriv->current); /* Okay, all disks are ready. Modify migrate_flags */ *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK | @@ -1550,7 +1551,8 @@ qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPtr jobInfo = jobPriv->current; char *error = NULL; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int ret = -1; @@ -1620,7 +1622,8 @@ qemuMigrationAnyCompleted(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPtr jobInfo = jobPriv->current; int pauseReason; if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0) @@ -1711,7 +1714,8 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPtr jobInfo = jobPriv->current; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int rv; @@ -1743,9 +1747,9 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobInfoUpdateDowntime(jobInfo); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); - priv->job.completed = qemuDomainJobInfoCopy(jobInfo); - priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); + jobPriv->completed = qemuDomainJobInfoCopy(jobInfo); + jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) @@ -3017,16 +3021,16 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, return -1; if (retcode == 0) - jobInfo = priv->job.completed; + jobInfo = jobPriv->completed; else - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); /* Update times with the values sent by the destination daemon */ if (mig->jobInfo && jobInfo) { int reason; /* We need to refresh migration statistics after a completed post-copy - * migration since priv->job.completed contains obsolete data from the + * migration since jobPriv->completed contains obsolete data from the * time we switched to post-copy mode. */ if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && @@ -3478,6 +3482,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, int ret = -1; unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; g_autoptr(qemuMigrationCookie) mig = NULL; g_autofree char *tlsAlias = NULL; qemuMigrationIOThreadPtr iothread = NULL; @@ -3635,7 +3640,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, /* explicitly do this *after* we entered the monitor, * as this is a critical section so we are guaranteed * priv->job.abortJob will not change */ - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); @@ -3740,7 +3745,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, * resume it now once we finished all block jobs and wait for the real * end of the migration. */ - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { + if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { if (qemuMigrationSrcContinue(driver, vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) @@ -3768,11 +3773,11 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, goto error; } - if (priv->job.completed) { - priv->job.completed->stopped = priv->job.current->stopped; - qemuDomainJobInfoUpdateTime(priv->job.completed); - qemuDomainJobInfoUpdateDowntime(priv->job.completed); - ignore_value(virTimeMillisNow(&priv->job.completed->sent)); + if (jobPriv->completed) { + jobPriv->completed->stopped = jobPriv->current->stopped; + qemuDomainJobInfoUpdateTime(jobPriv->completed); + qemuDomainJobInfoUpdateDowntime(jobPriv->completed); + ignore_value(virTimeMillisNow(&jobPriv->completed->sent)); } cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK | @@ -3800,7 +3805,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (virDomainObjIsActive(vm)) { if (cancel && - priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && + jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMonitorMigrateCancel(priv->mon); @@ -3813,8 +3818,8 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn); - if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + if (jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; } if (iothread) @@ -5021,7 +5026,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, : VIR_MIGRATION_PHASE_FINISH2); qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK | QEMU_MIGRATION_COOKIE_STATS | @@ -5113,7 +5118,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, goto endjob; } - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) inPostCopy = true; if (!(flags & VIR_MIGRATE_PAUSED)) { @@ -5227,9 +5232,9 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, if (dom) { if (jobInfo) { - priv->job.completed = g_steal_pointer(&jobInfo); - priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; - priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + jobPriv->completed = g_steal_pointer(&jobInfo); + jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobPriv->completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; } if (qemuMigrationBakeCookie(mig, driver, vm, @@ -5242,7 +5247,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, * is obsolete anyway. */ if (inPostCopy) - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); } qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, @@ -5471,6 +5476,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, unsigned long apiFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virDomainJobOperation op; unsigned long long mask; @@ -5487,7 +5493,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) return -1; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; qemuDomainObjSetAsyncJobMask(vm, mask); return 0; diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index cef2555988..6cf1c22812 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -509,12 +509,13 @@ qemuMigrationCookieAddStatistics(qemuMigrationCookiePtr mig, virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (!priv->job.completed) + if (!jobPriv->completed) return 0; g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree); - mig->jobInfo = qemuDomainJobInfoCopy(priv->job.completed); + mig->jobInfo = qemuDomainJobInfoCopy(jobPriv->completed); mig->flags |= QEMU_MIGRATION_COOKIE_STATS; @@ -1465,6 +1466,7 @@ qemuMigrationEatCookie(virQEMUDriverPtr driver, unsigned int flags) { g_autoptr(qemuMigrationCookie) mig = NULL; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; /* Parse & validate incoming cookie (if any) */ if (cookiein && cookieinlen && @@ -1513,7 +1515,7 @@ qemuMigrationEatCookie(virQEMUDriverPtr driver, } if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo) - mig->jobInfo->operation = priv->job.current->operation; + mig->jobInfo->operation = jobPriv->current->operation; return g_steal_pointer(&mig); } diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 55b31382f3..aa36264b6c 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -658,6 +658,7 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainEventSuspendedDetailType detail; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virObjectLock(vm); @@ -669,7 +670,7 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING && !priv->pausedShutdown) { if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) { - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) reason = VIR_DOMAIN_PAUSED_POSTCOPY; else reason = VIR_DOMAIN_PAUSED_MIGRATION; @@ -681,8 +682,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, vm->def->name, virDomainPausedReasonTypeToString(reason), detail); - if (priv->job.current) - ignore_value(virTimeMillisNow(&priv->job.current->stopped)); + if (jobPriv->current) + ignore_value(virTimeMillisNow(&jobPriv->current->stopped)); if (priv->signalStop) virDomainObjBroadcast(vm); @@ -1650,6 +1651,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, void *opaque) { qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; virQEMUDriverPtr driver = opaque; virObjectEventPtr event = NULL; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); @@ -1662,12 +1664,13 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, qemuMonitorMigrationStatusTypeToString(status)); priv = vm->privateData; + jobPriv = priv->job.privateData; if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { VIR_DEBUG("got MIGRATION event without a migration job"); goto cleanup; } - priv->job.current->stats.mig.status = status; + jobPriv->current->stats.mig.status = status; virDomainObjBroadcast(vm); if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY && @@ -1748,13 +1751,13 @@ qemuProcessHandleDumpCompleted(qemuMonitorPtr mon G_GNUC_UNUSED, goto cleanup; } jobPriv->dumpCompleted = true; - priv->job.current->stats.dump = *stats; + jobPriv->current->stats.dump = *stats; priv->job.error = g_strdup(error); /* Force error if extracting the DUMP_COMPLETED status failed */ if (!error && status < 0) { priv->job.error = g_strdup(virGetLastErrorMessage()); - priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; + jobPriv->current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; } virDomainObjBroadcast(vm); @@ -3268,6 +3271,7 @@ int qemuProcessStopCPUs(virQEMUDriverPtr driver, { int ret = -1; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; VIR_FREE(priv->lockState); @@ -3286,8 +3290,8 @@ int qemuProcessStopCPUs(virQEMUDriverPtr driver, /* de-activate netdevs after stopping CPUs */ ignore_value(qemuInterfaceStopDevices(vm->def)); - if (priv->job.current) - ignore_value(virTimeMillisNow(&priv->job.current->stopped)); + if (jobPriv->current) + ignore_value(virTimeMillisNow(&jobPriv->current->stopped)); /* The STOP event handler will change the domain state with the reason * saved in priv->pausedReason and it will also emit corresponding domain @@ -3584,6 +3588,7 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, unsigned int *stopFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virDomainState state; int reason; unsigned long long now; @@ -3652,11 +3657,11 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, /* We reset the job parameters for backup so that the job will look * active. This is possible because we are able to recover the state * of blockjobs and also the backup job allows all sub-job types */ - priv->job.current = g_new0(qemuDomainJobInfo, 1); - priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; - priv->job.current->started = now; + jobPriv->current = g_new0(qemuDomainJobInfo, 1); + jobPriv->current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobPriv->current->started = now; break; case QEMU_ASYNC_JOB_NONE: @@ -3761,7 +3766,6 @@ qemuDomainPerfRestart(virDomainObjPtr vm) return 0; } - static void qemuProcessReconnectCheckMemAliasOrderMismatch(virDomainObjPtr vm) { diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 1e8ea80b22..9f4a146861 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -1305,11 +1305,13 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, /* do the memory snapshot if necessary */ if (memory) { + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + /* check if migration is possible */ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* allow the migration job to be cancelled or the domain to be paused */ qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | -- 2.25.1

Since the attribute `jobs_queued` was required to be accessed by jobs, callback functions: `getJobsQueued`, `increaseJobsQueued` and `decreaseJobsQueued` were added to access them. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 24 ++++++++++++++++++++++++ src/qemu/qemu_domainjob.c | 14 +++++++------- src/qemu/qemu_domainjob.h | 6 ++++++ 3 files changed, 37 insertions(+), 7 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 42efbc3230..bb062c6dd1 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -776,6 +776,27 @@ qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, return 0; } +static int +qemuDomainGetJobsQueued(virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + return priv->jobs_queued; +} + + +static void +qemuDomainIncreaseJobsQueued(virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + priv->jobs_queued++; +} + +static void +qemuDomainDecreaseJobsQueued(virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + priv->jobs_queued--; +} static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, @@ -785,6 +806,9 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .parseJob = qemuDomainParseJobPrivate, .setJobInfoOperation = qemuDomainJobInfoSetOperation, .currentJobInfoInit = qemuDomainCurrentJobInfoInit, + .getJobsQueued = qemuDomainGetJobsQueued, + .increaseJobsQueued = qemuDomainIncreaseJobsQueued, + .decreaseJobsQueued = qemuDomainDecreaseJobsQueued, }; /** diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index c100262e49..4c7492813a 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -365,13 +365,13 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, if (virTimeMillisNow(&now) < 0) return -1; - priv->jobs_queued++; + priv->job.cb->increaseJobsQueued(obj); then = now + QEMU_JOB_WAIT_TIME; retry: if ((!async && job != QEMU_JOB_DESTROY) && cfg->maxQueuedJobs && - priv->jobs_queued > cfg->maxQueuedJobs) { + priv->job.cb->getJobsQueued(obj) > cfg->maxQueuedJobs) { goto error; } @@ -502,7 +502,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, } ret = -2; } else if (cfg->maxQueuedJobs && - priv->jobs_queued > cfg->maxQueuedJobs) { + priv->job.cb->getJobsQueued(obj) > cfg->maxQueuedJobs) { if (blocker && agentBlocker) { virReportError(VIR_ERR_OPERATION_FAILED, _("cannot acquire state change " @@ -532,7 +532,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, } cleanup: - priv->jobs_queued--; + priv->job.cb->decreaseJobsQueued(obj); return ret; } @@ -653,7 +653,7 @@ qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJob job = priv->job.active; - priv->jobs_queued--; + priv->job.cb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), @@ -674,7 +674,7 @@ qemuDomainObjEndAgentJob(virDomainObjPtr obj) qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainAgentJob agentJob = priv->job.agentActive; - priv->jobs_queued--; + priv->job.cb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)", qemuDomainAgentJobTypeToString(agentJob), @@ -692,7 +692,7 @@ qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; - priv->jobs_queued--; + priv->job.cb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)", qemuDomainAsyncJobTypeToString(priv->job.asyncJob), diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index c1c68719a6..d3bc59cbcb 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -113,6 +113,9 @@ typedef void (*qemuDomainObjJobInfoSetOperation)(qemuDomainJobObjPtr, virDomainJobOperation); typedef void (*qemuDomainObjCurrentJobInfoInit)(qemuDomainJobObjPtr, unsigned long long); +typedef int (*qemuDomainObjGetJobsQueued)(virDomainObjPtr); +typedef void (*qemuDomainObjIncreaseJobsQueued)(virDomainObjPtr); +typedef void (*qemuDomainObjDecreaseJobsQueued)(virDomainObjPtr); typedef struct _qemuDomainObjPrivateJobCallbacks qemuDomainObjPrivateJobCallbacks; typedef qemuDomainObjPrivateJobCallbacks *qemuDomainObjPrivateJobCallbacksPtr; @@ -124,6 +127,9 @@ struct _qemuDomainObjPrivateJobCallbacks { qemuDomainObjPrivateJobParse parseJob; qemuDomainObjJobInfoSetOperation setJobInfoOperation; qemuDomainObjCurrentJobInfoInit currentJobInfoInit; + qemuDomainObjGetJobsQueued getJobsQueued; + qemuDomainObjIncreaseJobsQueued increaseJobsQueued; + qemuDomainObjDecreaseJobsQueued decreaseJobsQueued; }; struct _qemuDomainJobObj { -- 2.25.1

Reference to `maxQueuedJobs` required us to access config of the qemu-driver. To avoid jobs accessing them directly, we add callback function `getMaxQueuedJobs` to job's private-callback functions structure. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 11 +++++++++++ src/qemu/qemu_domainjob.c | 9 ++++----- src/qemu/qemu_domainjob.h | 2 ++ 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index bb062c6dd1..d64c262971 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -798,6 +798,14 @@ qemuDomainDecreaseJobsQueued(virDomainObjPtr vm) priv->jobs_queued--; } +static int +qemuDomainGetMaxQueuedJobs(virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(priv->driver); + return cfg->maxQueuedJobs; +} + static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, @@ -809,6 +817,7 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .getJobsQueued = qemuDomainGetJobsQueued, .increaseJobsQueued = qemuDomainIncreaseJobsQueued, .decreaseJobsQueued = qemuDomainDecreaseJobsQueued, + .getMaxQueuedJobs = qemuDomainGetMaxQueuedJobs, }; /** @@ -2256,6 +2265,8 @@ static void * qemuDomainObjPrivateAlloc(void *opaque) { qemuDomainObjPrivatePtr priv; + virQEMUDriverPtr driver = opaque; + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); if (VIR_ALLOC(priv) < 0) return NULL; diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 4c7492813a..dbc9c06d33 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -344,7 +344,6 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, unsigned long long then; bool nested = job == QEMU_JOB_ASYNC_NESTED; bool async = job == QEMU_JOB_ASYNC; - g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); const char *blocker = NULL; const char *agentBlocker = NULL; int ret = -1; @@ -370,8 +369,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, retry: if ((!async && job != QEMU_JOB_DESTROY) && - cfg->maxQueuedJobs && - priv->job.cb->getJobsQueued(obj) > cfg->maxQueuedJobs) { + priv->job.cb->getMaxQueuedJobs(obj) && + priv->job.cb->getJobsQueued(obj) > priv->job.cb->getMaxQueuedJobs(obj)) { goto error; } @@ -501,8 +500,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, _("cannot acquire state change lock")); } ret = -2; - } else if (cfg->maxQueuedJobs && - priv->job.cb->getJobsQueued(obj) > cfg->maxQueuedJobs) { + } else if (priv->job.cb->getMaxQueuedJobs(obj) && + priv->job.cb->getJobsQueued(obj) > priv->job.cb->getMaxQueuedJobs(obj)) { if (blocker && agentBlocker) { virReportError(VIR_ERR_OPERATION_FAILED, _("cannot acquire state change " diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index d3bc59cbcb..f3d9218ec0 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -116,6 +116,7 @@ typedef void (*qemuDomainObjCurrentJobInfoInit)(qemuDomainJobObjPtr, typedef int (*qemuDomainObjGetJobsQueued)(virDomainObjPtr); typedef void (*qemuDomainObjIncreaseJobsQueued)(virDomainObjPtr); typedef void (*qemuDomainObjDecreaseJobsQueued)(virDomainObjPtr); +typedef int (*qemuDomainObjGetMaxQueuedJobs)(virDomainObjPtr); typedef struct _qemuDomainObjPrivateJobCallbacks qemuDomainObjPrivateJobCallbacks; typedef qemuDomainObjPrivateJobCallbacks *qemuDomainObjPrivateJobCallbacksPtr; @@ -130,6 +131,7 @@ struct _qemuDomainObjPrivateJobCallbacks { qemuDomainObjGetJobsQueued getJobsQueued; qemuDomainObjIncreaseJobsQueued increaseJobsQueued; qemuDomainObjDecreaseJobsQueued decreaseJobsQueued; + qemuDomainObjGetMaxQueuedJobs getMaxQueuedJobs; }; struct _qemuDomainJobObj { -- 2.25.1

`qemuDomainJobPrivateJobCallbacks` structure was nested inside `qemuDomainJobPrivateCallbacks` structure, so that in future we may add generic callbacks that we can't really categorize into the later callback structure. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 8 ++++++-- src/qemu/qemu_domainjob.c | 40 +++++++++++++++++++-------------------- src/qemu/qemu_domainjob.h | 39 ++++++++++++++++++++++---------------- 3 files changed, 49 insertions(+), 38 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index d64c262971..82638cfcd9 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -806,7 +806,7 @@ qemuDomainGetMaxQueuedJobs(virDomainObjPtr vm) return cfg->maxQueuedJobs; } -static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { +static qemuDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, @@ -820,6 +820,10 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .getMaxQueuedJobs = qemuDomainGetMaxQueuedJobs, }; +static qemuDomainJobPrivateCallbacks qemuJobPrivateCallbacks = { + .jobcb = &qemuJobPrivateJobCallbacks, +}; + /** * qemuDomainObjFromDomain: * @domain: Domain pointer that has to be looked up @@ -2271,7 +2275,7 @@ qemuDomainObjPrivateAlloc(void *opaque) if (VIR_ALLOC(priv) < 0) return NULL; - if (qemuDomainObjInitJob(&priv->job, &qemuPrivateJobCallbacks) < 0) { + if (qemuDomainObjInitJob(&priv->job, &qemuJobPrivateCallbacks) < 0) { virReportSystemError(errno, "%s", _("Unable to init qemu driver mutexes")); goto error; diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index dbc9c06d33..6405a6ffc1 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -117,21 +117,21 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, int qemuDomainObjInitJob(qemuDomainJobObjPtr job, - qemuDomainObjPrivateJobCallbacksPtr cb) + qemuDomainJobPrivateCallbacksPtr cb) { memset(job, 0, sizeof(*job)); job->cb = cb; - if (!(job->privateData = job->cb->allocJobPrivate())) + if (!(job->privateData = job->cb->jobcb->allocJobPrivate())) return -1; if (virCondInit(&job->cond) < 0) { - job->cb->freeJobPrivate(job->privateData); + job->cb->jobcb->freeJobPrivate(job->privateData); return -1; } if (virCondInit(&job->asyncCond) < 0) { - job->cb->freeJobPrivate(job->privateData); + job->cb->jobcb->freeJobPrivate(job->privateData); virCondDestroy(&job->cond); return -1; } @@ -171,7 +171,7 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) job->mask = QEMU_JOB_DEFAULT_MASK; job->abortJob = false; VIR_FREE(job->error); - job->cb->resetJobPrivate(job->privateData); + job->cb->jobcb->resetJobPrivate(job->privateData); job->apiFlags = 0; } @@ -190,7 +190,7 @@ qemuDomainObjRestoreJob(virDomainObjPtr obj, job->privateData = g_steal_pointer(&priv->job.privateData); job->apiFlags = priv->job.apiFlags; - if (!(priv->job.privateData = priv->job.cb->allocJobPrivate())) + if (!(priv->job.privateData = priv->job.cb->jobcb->allocJobPrivate())) return -1; job->cb = priv->job.cb; @@ -204,7 +204,7 @@ qemuDomainObjFreeJob(qemuDomainJobObjPtr job) { qemuDomainObjResetJob(job); qemuDomainObjResetAsyncJob(job); - job->cb->freeJobPrivate(job->privateData); + job->cb->jobcb->freeJobPrivate(job->privateData); virCondDestroy(&job->cond); virCondDestroy(&job->asyncCond); } @@ -364,13 +364,13 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, if (virTimeMillisNow(&now) < 0) return -1; - priv->job.cb->increaseJobsQueued(obj); + priv->job.cb->jobcb->increaseJobsQueued(obj); then = now + QEMU_JOB_WAIT_TIME; retry: if ((!async && job != QEMU_JOB_DESTROY) && - priv->job.cb->getMaxQueuedJobs(obj) && - priv->job.cb->getJobsQueued(obj) > priv->job.cb->getMaxQueuedJobs(obj)) { + priv->job.cb->jobcb->getMaxQueuedJobs(obj) && + priv->job.cb->jobcb->getJobsQueued(obj) > priv->job.cb->jobcb->getMaxQueuedJobs(obj)) { goto error; } @@ -416,7 +416,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - priv->job.cb->currentJobInfoInit(&priv->job, now); + priv->job.cb->jobcb->currentJobInfoInit(&priv->job, now); priv->job.asyncJob = asyncJob; priv->job.asyncOwner = virThreadSelfID(); priv->job.asyncOwnerAPI = virThreadJobGet(); @@ -500,8 +500,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, _("cannot acquire state change lock")); } ret = -2; - } else if (priv->job.cb->getMaxQueuedJobs(obj) && - priv->job.cb->getJobsQueued(obj) > priv->job.cb->getMaxQueuedJobs(obj)) { + } else if (priv->job.cb->jobcb->getMaxQueuedJobs(obj) && + priv->job.cb->jobcb->getJobsQueued(obj) > priv->job.cb->jobcb->getMaxQueuedJobs(obj)) { if (blocker && agentBlocker) { virReportError(VIR_ERR_OPERATION_FAILED, _("cannot acquire state change " @@ -531,7 +531,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, } cleanup: - priv->job.cb->decreaseJobsQueued(obj); + priv->job.cb->jobcb->decreaseJobsQueued(obj); return ret; } @@ -586,7 +586,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, return -1; priv = obj->privateData; - priv->job.cb->setJobInfoOperation(&priv->job, operation); + priv->job.cb->jobcb->setJobInfoOperation(&priv->job, operation); priv->job.apiFlags = apiFlags; return 0; } @@ -652,7 +652,7 @@ qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJob job = priv->job.active; - priv->job.cb->decreaseJobsQueued(obj); + priv->job.cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), @@ -673,7 +673,7 @@ qemuDomainObjEndAgentJob(virDomainObjPtr obj) qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainAgentJob agentJob = priv->job.agentActive; - priv->job.cb->decreaseJobsQueued(obj); + priv->job.cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)", qemuDomainAgentJobTypeToString(agentJob), @@ -691,7 +691,7 @@ qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; - priv->job.cb->decreaseJobsQueued(obj); + priv->job.cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)", qemuDomainAsyncJobTypeToString(priv->job.asyncJob), @@ -744,7 +744,7 @@ qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE) virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags); - if (priv->job.cb->formatJob(&childBuf, &priv->job, vm) < 0) + if (priv->job.cb->jobcb->formatJob(&childBuf, &priv->job, vm) < 0) return -1; virXMLFormatElement(buf, "job", &attrBuf, &childBuf); @@ -804,7 +804,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, return -1; } - if (priv->job.cb->parseJob(ctxt, job, vm) < 0) + if (priv->job.cb->jobcb->parseJob(ctxt, job, vm) < 0) return -1; return 0; diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index f3d9218ec0..211503b439 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -118,20 +118,27 @@ typedef void (*qemuDomainObjIncreaseJobsQueued)(virDomainObjPtr); typedef void (*qemuDomainObjDecreaseJobsQueued)(virDomainObjPtr); typedef int (*qemuDomainObjGetMaxQueuedJobs)(virDomainObjPtr); -typedef struct _qemuDomainObjPrivateJobCallbacks qemuDomainObjPrivateJobCallbacks; -typedef qemuDomainObjPrivateJobCallbacks *qemuDomainObjPrivateJobCallbacksPtr; -struct _qemuDomainObjPrivateJobCallbacks { - qemuDomainObjPrivateJobAlloc allocJobPrivate; - qemuDomainObjPrivateJobFree freeJobPrivate; - qemuDomainObjPrivateJobReset resetJobPrivate; - qemuDomainObjPrivateJobFormat formatJob; - qemuDomainObjPrivateJobParse parseJob; - qemuDomainObjJobInfoSetOperation setJobInfoOperation; - qemuDomainObjCurrentJobInfoInit currentJobInfoInit; - qemuDomainObjGetJobsQueued getJobsQueued; - qemuDomainObjIncreaseJobsQueued increaseJobsQueued; - qemuDomainObjDecreaseJobsQueued decreaseJobsQueued; - qemuDomainObjGetMaxQueuedJobs getMaxQueuedJobs; +typedef struct _qemuDomainJobPrivateJobCallbacks qemuDomainJobPrivateJobCallbacks; +typedef qemuDomainJobPrivateJobCallbacks *qemuDomainJobPrivateJobCallbacksPtr; +struct _qemuDomainJobPrivateJobCallbacks { + qemuDomainObjPrivateJobAlloc allocJobPrivate; + qemuDomainObjPrivateJobFree freeJobPrivate; + qemuDomainObjPrivateJobReset resetJobPrivate; + qemuDomainObjPrivateJobFormat formatJob; + qemuDomainObjPrivateJobParse parseJob; + qemuDomainObjJobInfoSetOperation setJobInfoOperation; + qemuDomainObjCurrentJobInfoInit currentJobInfoInit; + qemuDomainObjGetJobsQueued getJobsQueued; + qemuDomainObjIncreaseJobsQueued increaseJobsQueued; + qemuDomainObjDecreaseJobsQueued decreaseJobsQueued; + qemuDomainObjGetMaxQueuedJobs getMaxQueuedJobs; +}; + +typedef struct _qemuDomainJobPrivateCallbacks qemuDomainJobPrivateCallbacks; +typedef qemuDomainJobPrivateCallbacks *qemuDomainJobPrivateCallbacksPtr; +struct _qemuDomainJobPrivateCallbacks { + /* Job related callbacks */ + qemuDomainJobPrivateJobCallbacksPtr jobcb; }; struct _qemuDomainJobObj { @@ -162,7 +169,7 @@ struct _qemuDomainJobObj { unsigned long apiFlags; /* flags passed to the API which started the async job */ void *privateData; /* job specific collection of data */ - qemuDomainObjPrivateJobCallbacksPtr cb; + qemuDomainJobPrivateCallbacksPtr cb; }; const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, @@ -216,7 +223,7 @@ void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); int qemuDomainObjInitJob(qemuDomainJobObjPtr job, - qemuDomainObjPrivateJobCallbacksPtr cb); + qemuDomainJobPrivateCallbacksPtr cb); bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob); -- 2.25.1

The function `qemuDomainObjSaveStatus` required an access to `virQEMUDriverPtr`. To make jobs hypervisor-agnostic we remove this funciton and replace it with a callback function from `qemuDomainJob` Removal of `virQEMUDriverPtr` as parameter resulted in removal of the same from function, where it was pass. All of such references were removed as the variable was no longer required. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 41 +- src/qemu/qemu_backup.h | 3 +- src/qemu/qemu_block.c | 45 +- src/qemu/qemu_block.h | 6 +- src/qemu/qemu_blockjob.c | 45 +- src/qemu/qemu_blockjob.h | 3 +- src/qemu/qemu_checkpoint.c | 29 +- src/qemu/qemu_domain.c | 78 ++- src/qemu/qemu_domain.h | 24 +- src/qemu/qemu_domainjob.c | 63 ++- src/qemu/qemu_domainjob.h | 31 +- src/qemu/qemu_driver.c | 800 ++++++++++++++----------------- src/qemu/qemu_hotplug.c | 319 ++++++------ src/qemu/qemu_hotplug.h | 30 +- src/qemu/qemu_migration.c | 313 ++++++------ src/qemu/qemu_migration.h | 12 +- src/qemu/qemu_migration_cookie.c | 7 +- src/qemu/qemu_migration_params.c | 48 +- src/qemu/qemu_migration_params.h | 15 +- src/qemu/qemu_process.c | 258 +++++----- src/qemu/qemu_process.h | 15 +- src/qemu/qemu_snapshot.c | 48 +- tests/qemuhotplugtest.c | 2 +- 23 files changed, 999 insertions(+), 1236 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 1822c6f267..7e5926250a 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -127,9 +127,9 @@ qemuBackupDiskDataCleanupOne(virDomainObjPtr vm, if (!dd->started) { if (dd->added) { - qemuDomainObjEnterMonitor(priv->driver, vm); + qemuDomainObjEnterMonitor(vm); qemuBlockStorageSourceAttachRollback(priv->mon, dd->crdata->srcdata[0]); - ignore_value(qemuDomainObjExitMonitor(priv->driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } if (dd->created) { @@ -439,12 +439,12 @@ qemuBackupDiskPrepareOneStorage(virDomainObjPtr vm, QEMU_ASYNC_JOB_BACKUP) < 0) return -1; } else { - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) return -1; rc = qemuBlockStorageSourceAttachApply(priv->mon, dd->crdata->srcdata[0]); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; } @@ -572,7 +572,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, virDomainBackupDefFree(priv->backup); priv->backup = NULL; - qemuDomainObjEndAsyncJob(priv->driver, vm); + qemuDomainObjEndAsyncJob(vm); } @@ -625,12 +625,12 @@ qemuBackupJobCancelBlockjobs(virDomainObjPtr vm, if (backupdisk->state != VIR_DOMAIN_BACKUP_DISK_STATE_RUNNING) continue; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; rc = qemuMonitorJobCancel(priv->mon, job->name, false); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; if (rc == 0) { @@ -740,7 +740,7 @@ qemuBackupBegin(virDomainObjPtr vm, * infrastructure for async jobs. We'll allow standard modify-type jobs * as the interlocking of conflicting operations is handled on the block * job level */ - if (qemuDomainObjBeginAsyncJob(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_BACKUP, VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0) return -1; @@ -804,7 +804,7 @@ qemuBackupBegin(virDomainObjPtr vm, priv->backup = g_steal_pointer(&def); - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) goto endjob; /* TODO: TLS is a must-have for the modern age */ @@ -824,7 +824,7 @@ qemuBackupBegin(virDomainObjPtr vm, if (rc == 0) rc = qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; job_started = true; @@ -837,12 +837,12 @@ qemuBackupBegin(virDomainObjPtr vm, } if (pull) { - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) goto endjob; /* note that if the export fails we've already created the checkpoint * and we will not delete it */ rc = qemuBackupBeginPullExportDisks(vm, dd, ndd); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) { @@ -863,14 +863,14 @@ qemuBackupBegin(virDomainObjPtr vm, qemuCheckpointRollbackMetadata(vm, chk); if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) && - qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) == 0) { + qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) == 0) { if (nbd_running) ignore_value(qemuMonitorNBDServerStop(priv->mon)); if (tlsAlias) ignore_value(qemuMonitorDelObject(priv->mon, tlsAlias, false)); if (tlsSecretAlias) ignore_value(qemuMonitorDelObject(priv->mon, tlsSecretAlias, false)); - ignore_value(qemuDomainObjExitMonitor(priv->driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } if (ret < 0 && !job_started && priv->backup) @@ -879,7 +879,7 @@ qemuBackupBegin(virDomainObjPtr vm, if (ret == 0) qemuDomainObjReleaseAsyncJob(vm); else - qemuDomainObjEndAsyncJob(priv->driver, vm); + qemuDomainObjEndAsyncJob(vm); return ret; } @@ -929,14 +929,14 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm, return; if (backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL) { - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; ignore_value(qemuMonitorNBDServerStop(priv->mon)); if (backup->tlsAlias) ignore_value(qemuMonitorDelObject(priv->mon, backup->tlsAlias, false)); if (backup->tlsSecretAlias) ignore_value(qemuMonitorDelObject(priv->mon, backup->tlsSecretAlias, false)); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; /* update the final statistics with the current job's data */ @@ -1067,8 +1067,7 @@ qemuBackupGetJobInfoStatsUpdateOne(virDomainObjPtr vm, int -qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBackupGetJobInfoStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { qemuDomainBackupStats *stats = &jobInfo->stats.backup; @@ -1090,11 +1089,11 @@ qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorGetJobInfo(priv->mon, &blockjobs, &nblockjobs); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; /* count in completed jobs */ diff --git a/src/qemu/qemu_backup.h b/src/qemu/qemu_backup.h index 075fde709b..9925fddbf9 100644 --- a/src/qemu/qemu_backup.h +++ b/src/qemu/qemu_backup.h @@ -48,8 +48,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, qemuDomainJobStatus jobstatus); int -qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBackupGetJobInfoStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo); /* exported for testing */ diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c index 26c1b42428..23b60e73ec 100644 --- a/src/qemu/qemu_block.c +++ b/src/qemu/qemu_block.c @@ -320,8 +320,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk, int -qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockNodeNamesDetect(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -334,13 +333,13 @@ qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_NAMED_BLOCK_NODES)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; data = qemuMonitorQueryNamedBlockNodes(qemuDomainGetMonitor(vm)); blockstats = qemuMonitorQueryBlockstats(qemuDomainGetMonitor(vm)); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !data || !blockstats) + if (qemuDomainObjExitMonitor(vm) < 0 || !data || !blockstats) return -1; if (!(disktable = qemuBlockNodeNameGetBackingChain(data, blockstats))) @@ -1976,7 +1975,6 @@ qemuBlockStorageSourceChainDetach(qemuMonitorPtr mon, /** * qemuBlockStorageSourceDetachOneBlockdev: - * @driver: qemu driver object * @vm: domain object * @asyncJob: currently running async job * @src: storage source to detach @@ -1986,14 +1984,13 @@ qemuBlockStorageSourceChainDetach(qemuMonitorPtr mon, * monitor internally. */ int -qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virStorageSourcePtr src) { int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorBlockdevDel(qemuDomainGetMonitor(vm), src->nodeformat); @@ -2001,7 +1998,7 @@ qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriverPtr driver, if (ret == 0) ret = qemuMonitorBlockdevDel(qemuDomainGetMonitor(vm), src->nodestorage); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; @@ -2561,13 +2558,13 @@ qemuBlockStorageSourceCreateGeneric(virDomainObjPtr vm, qemuBlockJobSyncBegin(job); - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorBlockdevCreate(priv->mon, job->name, props); props = NULL; - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; qemuBlockJobStarted(job, vm); @@ -2708,18 +2705,18 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, false, true) < 0) return -1; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuBlockStorageSourceAttachApplyStorageDeps(priv->mon, data); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; if (qemuBlockStorageSourceCreateStorage(vm, src, chain, asyncJob) < 0) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuBlockStorageSourceAttachApplyStorage(priv->mon, data); @@ -2727,7 +2724,7 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, if (rc == 0) rc = qemuBlockStorageSourceAttachApplyFormatDeps(priv->mon, data); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; if (qemuBlockStorageSourceCreateFormat(vm, src, backingStore, chain, @@ -2740,12 +2737,12 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, false, true) < 0) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuBlockStorageSourceAttachApplyFormat(priv->mon, data); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; ret = 0; @@ -2753,10 +2750,10 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, cleanup: if (ret < 0 && virDomainObjIsActive(vm) && - qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) == 0) { + qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuBlockStorageSourceAttachRollback(priv->mon, data); - ignore_value(qemuDomainObjExitMonitor(priv->driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } return ret; @@ -2861,17 +2858,16 @@ qemuBlockGetNamedNodeData(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; g_autoptr(virHashTable) blockNamedNodeData = NULL; bool supports_flat = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QMP_QUERY_NAMED_BLOCK_NODES_FLAT); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return NULL; blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon, supports_flat); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockNamedNodeData) + if (qemuDomainObjExitMonitor(vm) < 0 || !blockNamedNodeData) return NULL; return g_steal_pointer(&blockNamedNodeData); @@ -3185,7 +3181,6 @@ qemuBlockReopenFormat(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; g_autoptr(virJSONValue) reopenprops = NULL; int rc; @@ -3200,12 +3195,12 @@ qemuBlockReopenFormat(virDomainObjPtr vm, if (!(reopenprops = qemuBlockStorageSourceGetBlockdevProps(src, src->backingStore))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorBlockdevReopen(priv->mon, &reopenprops); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; return 0; diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h index 9aab620947..35148ea2ba 100644 --- a/src/qemu/qemu_block.h +++ b/src/qemu/qemu_block.h @@ -46,8 +46,7 @@ qemuBlockNodeNameGetBackingChain(virJSONValuePtr namednodesdata, virJSONValuePtr blockstats); int -qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockNodeNamesDetect(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); virHashTablePtr @@ -140,8 +139,7 @@ qemuBlockStorageSourceAttachRollback(qemuMonitorPtr mon, qemuBlockStorageSourceAttachDataPtr data); int -qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virStorageSourcePtr src); diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index c49c98e547..265f449b7a 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -491,8 +491,7 @@ qemuBlockJobRefreshJobsFindInactive(const void *payload, int -qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuBlockJobRefreshJobs(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorJobInfoPtr *jobinfo = NULL; @@ -503,11 +502,11 @@ qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, int ret = -1; int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorGetJobInfo(priv->mon, &jobinfo, &njobinfo); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; for (i = 0; i < njobinfo; i++) { @@ -524,13 +523,13 @@ qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, qemuBlockJobMarkBroken(job); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorJobCancel(priv->mon, job->name, true); if (rc == -1 && jobinfo[i]->status == QEMU_MONITOR_JOB_STATUS_CONCLUDED) VIR_WARN("can't cancel job '%s' with invalid data", job->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) @@ -757,7 +756,7 @@ qemuBlockJobEventProcessLegacyCompleted(virQEMUDriverPtr driver, disk->src->id = 0; virStorageSourceBackingStoreClear(disk->src); ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, NULL, true)); - ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob)); + ignore_value(qemuBlockNodeNamesDetect(vm, asyncJob)); qemuBlockJobUnregister(job, vm); qemuDomainSaveConfig(vm); } @@ -843,11 +842,11 @@ qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriverPtr driver, if (!(data = qemuBlockStorageSourceChainDetachPrepareBlockdev(chain))) return; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; qemuBlockStorageSourceChainDetach(qemuDomainGetMonitor(vm), data); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; qemuDomainStorageSourceChainAccessRevoke(driver, vm, chain); @@ -959,12 +958,12 @@ qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObjPtr vm, if (!actions) return 0; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return 0; @@ -1123,12 +1122,12 @@ qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObjPtr vm, return -1; } - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (!active) { @@ -1346,12 +1345,12 @@ qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObjPtr vm, if (!actions) return 0; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return 0; @@ -1431,12 +1430,12 @@ qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriverPtr driver, ignore_value(qemuMonitorTransactionBitmapRemove(actions, disk->mirror->nodeformat, "libvirt-tmp-activewrite")); - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; /* Ideally, we would make the backing chain read only again (yes, SELinux @@ -1480,12 +1479,12 @@ qemuBlockJobProcessEventConcludedCreate(virQEMUDriverPtr driver, VIR_FREE(backend->encryptsecretAlias); } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.create.src); @@ -1520,7 +1519,7 @@ qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver, return; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; if (backend) @@ -1529,7 +1528,7 @@ qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver, if (actions) qemuMonitorTransaction(qemuDomainGetMonitor(vm), &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; if (job->data.backup.store) @@ -1610,7 +1609,7 @@ qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job, unsigned long long progressCurrent = 0; unsigned long long progressTotal = 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; /* we need to fetch the error state as the event does not propagate it */ @@ -1643,7 +1642,7 @@ qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job, /* dismiss job in qemu */ ignore_value(qemuMonitorJobDismiss(qemuDomainGetMonitor(vm), job->name)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if ((job->newstate == QEMU_BLOCKJOB_STATE_COMPLETED || diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h index 9f73a3547c..bdf4787eb0 100644 --- a/src/qemu/qemu_blockjob.h +++ b/src/qemu/qemu_blockjob.h @@ -226,8 +226,7 @@ qemuBlockJobStartupFinalize(virDomainObjPtr vm, qemuBlockJobDataPtr job); int -qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, - virDomainObjPtr vm); +qemuBlockJobRefreshJobs(virDomainObjPtr vm); void qemuBlockJobUpdate(virDomainObjPtr vm, diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c index f45ab29d4c..b90410aa20 100644 --- a/src/qemu/qemu_checkpoint.c +++ b/src/qemu/qemu_checkpoint.c @@ -198,9 +198,9 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm, relabelimages = g_slist_prepend(relabelimages, src); } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; relabel: @@ -457,9 +457,9 @@ qemuCheckpointCreate(virQEMUDriverPtr driver, if (qemuCheckpointCreateCommon(driver, vm, def, &actions, &chk) < 0) return NULL; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorTransaction(qemuDomainGetMonitor(vm), &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) { + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) { qemuCheckpointRollbackMetadata(vm, chk); return NULL; } @@ -539,7 +539,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, /* Unlike snapshots, the RNG schema already ensured a sane filename. */ /* We are going to modify the domain below. */ - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return NULL; if (redefine) { @@ -561,7 +561,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, checkpoint = virGetDomainCheckpoint(domain, chk->def->name); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return checkpoint; } @@ -578,7 +578,6 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, virDomainCheckpointDefPtr chkdef) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; g_autoptr(virHashTable) blockNamedNodeData = NULL; g_autofree struct qemuCheckpointDiskMap *diskmap = NULL; g_autoptr(virJSONValue) recoveractions = NULL; @@ -589,7 +588,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -659,7 +658,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (rc == 0 && recoveractions) rc = qemuMonitorTransaction(priv->mon, &recoveractions); @@ -667,7 +666,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, if (rc == 0) rc = qemuMonitorTransaction(priv->mon, &mergeactions); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; /* now do a final refresh */ @@ -675,11 +674,11 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE))) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorTransaction(priv->mon, &cleanupactions); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; /* update disks */ @@ -698,7 +697,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -782,7 +781,7 @@ qemuCheckpointDelete(virDomainObjPtr vm, VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY | VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (!metadata_only) { @@ -850,6 +849,6 @@ qemuCheckpointDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 82638cfcd9..7f56720011 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -821,6 +821,7 @@ static qemuDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = { }; static qemuDomainJobPrivateCallbacks qemuJobPrivateCallbacks = { + .saveStatus = qemuDomainSaveStatus, .jobcb = &qemuJobPrivateJobCallbacks, }; @@ -6089,20 +6090,19 @@ qemuDomainSaveConfig(virDomainObjPtr obj) * To be followed with qemuDomainObjExitMonitor() once complete */ static int -qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; if (asyncJob != QEMU_ASYNC_JOB_NONE) { int ret; - if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0) + if ((ret = qemuDomainObjBeginNestedJob(obj, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); - qemuDomainObjEndJob(driver, obj); + qemuDomainObjEndJob(obj); return -1; } } else if (priv->job.asyncOwner == virThreadSelfID()) { @@ -6126,8 +6126,7 @@ qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver, } static void ATTRIBUTE_NONNULL(1) -qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver, - virDomainObjPtr obj) +qemuDomainObjExitMonitorInternal(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; bool hasRefs; @@ -6148,14 +6147,12 @@ qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver, priv->mon = NULL; if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjEndJob(driver, obj); + qemuDomainObjEndJob(obj); } -void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +void qemuDomainObjEnterMonitor(virDomainObjPtr obj) { - ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj, - QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuDomainObjEnterMonitorInternal(obj, QEMU_ASYNC_JOB_NONE)); } /* obj must NOT be locked before calling @@ -6168,10 +6165,9 @@ void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, * and replaced by the persistent definition, so pointers stolen * from the live definition could no longer be valid. */ -int qemuDomainObjExitMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +int qemuDomainObjExitMonitor(virDomainObjPtr obj) { - qemuDomainObjExitMonitorInternal(driver, obj); + qemuDomainObjExitMonitorInternal(obj); if (!virDomainObjIsActive(obj)) { if (virGetLastErrorCode() == VIR_ERR_OK) virReportError(VIR_ERR_OPERATION_FAILED, "%s", @@ -6196,11 +6192,10 @@ int qemuDomainObjExitMonitor(virQEMUDriverPtr driver, * in the meantime). */ int -qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { - return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob); + return qemuDomainObjEnterMonitorInternal(obj, asyncJob); } @@ -7134,10 +7129,10 @@ qemuDomainSnapshotDiscard(virQEMUDriverPtr driver, return -1; } else { priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); /* we continue on even in the face of error */ qemuMonitorDeleteSnapshot(priv->mon, snap->def->name); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } @@ -7297,12 +7292,12 @@ qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, { bool haveJob; - haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactive(driver, vm); if (haveJob) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -7318,12 +7313,12 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, { bool haveJob; - haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactiveLocked(driver, vm); if (haveJob) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -8338,18 +8333,17 @@ qemuDomainHasBlockjob(virDomainObjPtr vm, int -qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainUpdateDeviceList(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; char **aliases; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetDeviceAliases(priv->mon, &aliases); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc < 0) return -1; @@ -8361,8 +8355,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, int -qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainUpdateMemoryDeviceInfo(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -8373,12 +8366,12 @@ qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver, if (vm->def->nmems == 0) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetMemoryDeviceInfo(priv->mon, &meminfo); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { virHashFree(meminfo); return -1; } @@ -9599,7 +9592,6 @@ qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm) /** * qemuDomainRefreshVcpuInfo: - * @driver: qemu driver data * @vm: domain object * @asyncJob: current asynchronous job type * @state: refresh vcpu state @@ -9612,8 +9604,7 @@ qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm) * Returns 0 on success and -1 on fatal error. */ int -qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRefreshVcpuInfo(virDomainObjPtr vm, int asyncJob, bool state) { @@ -9634,13 +9625,13 @@ qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver, VIR_DEBUG("Maxvcpus %zu hotplug %d fast query %d", maxvcpus, hotplug, fast); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetCPUInfo(qemuDomainGetMonitor(vm), &info, maxvcpus, hotplug, fast); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) @@ -9752,7 +9743,6 @@ qemuDomainGetVcpuHalted(virDomainObjPtr vm, /** * qemuDomainRefreshVcpuHalted: - * @driver: qemu driver data * @vm: domain object * @asyncJob: current asynchronous job type * @@ -9761,8 +9751,7 @@ qemuDomainGetVcpuHalted(virDomainObjPtr vm, * Returns 0 on success and -1 on error */ int -qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRefreshVcpuHalted(virDomainObjPtr vm, int asyncJob) { virDomainVcpuDefPtr vcpu; @@ -9787,14 +9776,14 @@ qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver, QEMU_CAPS_QUERY_CPUS_FAST)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; fast = virQEMUCapsGet(QEMU_DOMAIN_PRIVATE(vm)->qemuCaps, QEMU_CAPS_QUERY_CPUS_FAST); haltedmap = qemuMonitorGetCpuHalted(qemuDomainGetMonitor(vm), maxvcpus, fast); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !haltedmap) + if (qemuDomainObjExitMonitor(vm) < 0 || !haltedmap) goto cleanup; for (i = 0; i < maxvcpus; i++) { @@ -10218,19 +10207,18 @@ qemuDomainVcpuPersistOrder(virDomainDefPtr def) int -qemuDomainCheckMonitor(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainCheckMonitor(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorCheck(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index f77a377e30..43fb37e786 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -578,15 +578,12 @@ void qemuDomainEventFlush(int timer, void *opaque); qemuMonitorPtr qemuDomainGetMonitor(virDomainObjPtr vm) ATTRIBUTE_NONNULL(1); -void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +void qemuDomainObjEnterMonitor(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); -int qemuDomainObjExitMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +int qemuDomainObjExitMonitor(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; @@ -802,11 +799,9 @@ extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig; extern virDomainABIStability virQEMUDriverDomainABIStability; extern virSaveCookieCallbacks virQEMUDriverDomainSaveCookie; -int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, - virDomainObjPtr vm, int asyncJob); +int qemuDomainUpdateDeviceList(virDomainObjPtr vm, int asyncJob); -int qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainUpdateMemoryDeviceInfo(virDomainObjPtr vm, int asyncJob); bool qemuDomainDefCheckABIStability(virQEMUDriverPtr driver, @@ -872,13 +867,11 @@ bool qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm); bool qemuDomainHasVcpuPids(virDomainObjPtr vm); pid_t qemuDomainGetVcpuPid(virDomainObjPtr vm, unsigned int vcpuid); int qemuDomainValidateVcpuInfo(virDomainObjPtr vm); -int qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainRefreshVcpuInfo(virDomainObjPtr vm, int asyncJob, bool state); bool qemuDomainGetVcpuHalted(virDomainObjPtr vm, unsigned int vcpu); -int qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainRefreshVcpuHalted(virDomainObjPtr vm, int asyncJob); bool qemuDomainSupportsNicdev(virDomainDefPtr def, @@ -973,8 +966,7 @@ bool qemuDomainVcpuHotplugIsInOrder(virDomainDefPtr def) void qemuDomainVcpuPersistOrder(virDomainDefPtr def) ATTRIBUTE_NONNULL(1); -int qemuDomainCheckMonitor(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainCheckMonitor(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); bool qemuDomainSupportsVideoVga(virDomainVideoDefPtr video, diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 6405a6ffc1..bb260ccb2e 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -217,8 +217,7 @@ qemuDomainTrackJob(qemuDomainJob job) void -qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjSetJobPhase(virDomainObjPtr obj, int phase) { qemuDomainObjPrivatePtr priv = obj->privateData; @@ -239,7 +238,7 @@ qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, priv->job.phase = phase; priv->job.asyncOwner = me; - qemuDomainObjSaveStatus(driver, obj); + priv->job.cb->saveStatus(obj); } void @@ -255,14 +254,13 @@ qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, } void -qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) +qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; - if (priv->job.active == QEMU_JOB_ASYNC_NESTED) qemuDomainObjResetJob(&priv->job); qemuDomainObjResetAsyncJob(&priv->job); - qemuDomainObjSaveStatus(driver, obj); + priv->job.cb->saveStatus(obj); } void @@ -311,7 +309,6 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, /** * qemuDomainObjBeginJobInternal: - * @driver: qemu driver * @obj: domain object * @job: qemuDomainJob to start * @asyncJob: qemuDomainAsyncJob to start @@ -332,8 +329,8 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, * -1 otherwise. */ static int ATTRIBUTE_NONNULL(1) -qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginJobInternal(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job, qemuDomainAgentJob agentJob, qemuDomainAsyncJob asyncJob, @@ -439,7 +436,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, } if (qemuDomainTrackJob(job)) - qemuDomainObjSaveStatus(driver, obj); + jobObj->cb->saveStatus(obj); return 0; @@ -543,11 +540,13 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, * * Successful calls must be followed by EndJob eventually */ -int qemuDomainObjBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginJob(virDomainObjPtr obj, qemuDomainJob job) { - if (qemuDomainObjBeginJobInternal(driver, obj, job, + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + + if (qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, false) < 0) return -1; @@ -563,37 +562,36 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, * To end job call qemuDomainObjEndAgentJob. */ int -qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginAgentJob(virDomainObjPtr obj, qemuDomainAgentJob agentJob) { - return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE, + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + + return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_NONE, agentJob, QEMU_ASYNC_JOB_NONE, false); } -int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) { - qemuDomainObjPrivatePtr priv; + qemuDomainObjPrivatePtr priv = obj->privateData; - if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC, + if (qemuDomainObjBeginJobInternal(obj, &priv->job, QEMU_JOB_ASYNC, QEMU_AGENT_JOB_NONE, asyncJob, false) < 0) return -1; - priv = obj->privateData; priv->job.cb->jobcb->setJobInfoOperation(&priv->job, operation); priv->job.apiFlags = apiFlags; return 0; } int -qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginNestedJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; @@ -610,7 +608,7 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, priv->job.asyncOwner); } - return qemuDomainObjBeginJobInternal(driver, obj, + return qemuDomainObjBeginJobInternal(obj, &priv->job, QEMU_JOB_ASYNC_NESTED, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, @@ -620,7 +618,6 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, /** * qemuDomainObjBeginJobNowait: * - * @driver: qemu driver * @obj: domain object * @job: qemuDomainJob to start * @@ -631,11 +628,13 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, * Returns: see qemuDomainObjBeginJobInternal */ int -qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginJobNowait(virDomainObjPtr obj, qemuDomainJob job) { - return qemuDomainObjBeginJobInternal(driver, obj, job, + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + + return qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, true); } @@ -647,7 +646,7 @@ qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, * earlier qemuDomainBeginJob() call */ void -qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) +qemuDomainObjEndJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJob job = priv->job.active; @@ -661,7 +660,7 @@ qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) qemuDomainObjResetJob(&priv->job); if (qemuDomainTrackJob(job)) - qemuDomainObjSaveStatus(driver, obj); + priv->job.cb->saveStatus(obj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ virCondBroadcast(&priv->job.cond); @@ -687,7 +686,7 @@ qemuDomainObjEndAgentJob(virDomainObjPtr obj) } void -qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) +qemuDomainObjEndAsyncJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; @@ -698,7 +697,7 @@ qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - qemuDomainObjSaveStatus(driver, obj); + priv->job.cb->saveStatus(obj); virCondBroadcast(&priv->job.asyncCond); } diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 211503b439..32ff01009d 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -104,6 +104,7 @@ typedef qemuDomainJobObj *qemuDomainJobObjPtr; typedef void *(*qemuDomainObjPrivateJobAlloc)(void); typedef void (*qemuDomainObjPrivateJobFree)(void *); typedef void (*qemuDomainObjPrivateJobReset)(void *); +typedef void (*qemuDomainObjPrivateSaveStatus)(virDomainObjPtr); typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, qemuDomainJobObjPtr, virDomainObjPtr); @@ -137,6 +138,9 @@ struct _qemuDomainJobPrivateJobCallbacks { typedef struct _qemuDomainJobPrivateCallbacks qemuDomainJobPrivateCallbacks; typedef qemuDomainJobPrivateCallbacks *qemuDomainJobPrivateCallbacksPtr; struct _qemuDomainJobPrivateCallbacks { + /* generic callbacks that we can't really categorize */ + qemuDomainObjPrivateSaveStatus saveStatus; + /* Job related callbacks */ qemuDomainJobPrivateJobCallbacksPtr jobcb; }; @@ -177,44 +181,35 @@ const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, const char *phase); -int qemuDomainObjBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginJob(virDomainObjPtr obj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, qemuDomainAgentJob agentJob) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; -void qemuDomainObjEndJob(virQEMUDriverPtr driver, - virDomainObjPtr obj); +void qemuDomainObjEndJob(virDomainObjPtr obj); void qemuDomainObjEndAgentJob(virDomainObjPtr obj); -void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj); +void qemuDomainObjEndAsyncJob(virDomainObjPtr obj); void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj); -void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, - virDomainObjPtr obj, +void qemuDomainObjSetJobPhase(virDomainObjPtr obj, int phase); void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, unsigned long long allowedJobs); int qemuDomainObjRestoreJob(virDomainObjPtr obj, qemuDomainJobObjPtr job); -void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj); +void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj); void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj); bool qemuDomainTrackJob(qemuDomainJob job); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 806577a70a..974f71b22f 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -191,8 +191,7 @@ qemuAutostartDomain(virDomainObjPtr vm, virResetLastError(); if (vm->autostart && !virDomainObjIsActive(vm)) { - if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) { + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to start job on VM '%s': %s"), vm->def->name, virGetLastErrorMessage()); @@ -206,7 +205,7 @@ qemuAutostartDomain(virDomainObjPtr vm, vm->def->name, virGetLastErrorMessage()); } - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); } ret = 0; @@ -1748,7 +1747,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, goto cleanup; def = NULL; - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) { qemuDomainRemoveInactiveJob(driver, vm); goto cleanup; @@ -1760,7 +1759,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, start_flags) < 0) { virDomainAuditStart(vm, "booted", false); qemuDomainRemoveInactive(driver, vm); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); goto cleanup; } @@ -1781,7 +1780,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, dom = virGetDomain(conn, vm->def->name, vm->def->uuid, vm->def->id); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: virDomainDefFree(def); @@ -1812,7 +1811,7 @@ static int qemuDomainSuspend(virDomainPtr dom) cfg = virQEMUDriverGetConfig(driver); priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_SUSPEND) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_SUSPEND) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1839,7 +1838,7 @@ static int qemuDomainSuspend(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -1865,7 +1864,7 @@ static int qemuDomainResume(virDomainPtr dom) if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1897,7 +1896,7 @@ static int qemuDomainResume(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -1916,8 +1915,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, int agentFlag = isReboot ? QEMU_AGENT_SHUTDOWN_REBOOT : QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1950,7 +1948,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1960,13 +1958,13 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, } qemuDomainSetFakeReboot(driver, vm, isReboot); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemPowerdown(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -2049,8 +2047,7 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, if (!isReboot) agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (!qemuDomainAgentAvailable(vm, agentForced)) @@ -2078,21 +2075,20 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, - QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) goto endjob; qemuDomainSetFakeReboot(driver, vm, isReboot); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemPowerdown(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -2155,7 +2151,6 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags) static int qemuDomainReset(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -2169,16 +2164,16 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) if (virDomainResetEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemReset(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; priv->fakeReboot = false; @@ -2188,7 +2183,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_CRASHED); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2228,7 +2223,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, reason == VIR_DOMAIN_PAUSED_STARTING_UP && !priv->beingDestroyed); - if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY, + if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, !(flags & VIR_DOMAIN_DESTROY_GRACEFUL)) < 0) goto cleanup; @@ -2259,7 +2254,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, endjob: if (ret == 0) qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2333,7 +2328,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2396,9 +2391,9 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, if (def) { priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); r = qemuMonitorSetBalloon(priv->mon, newmem); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || r < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || r < 0) goto endjob; /* Lack of balloon support is a fatal error */ @@ -2420,7 +2415,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2459,7 +2454,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2476,9 +2471,9 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); r = qemuMonitorSetMemoryStatsPeriod(priv->mon, def->memballoon, period); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (r < 0) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", @@ -2505,7 +2500,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2514,7 +2509,6 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) { - virQEMUDriverPtr driver = domain->conn->privateData; virDomainObjPtr vm = NULL; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -2529,19 +2523,19 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorInjectNMI(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2555,7 +2549,6 @@ static int qemuDomainSendKey(virDomainPtr domain, int nkeycodes, unsigned int flags) { - virQEMUDriverPtr driver = domain->conn->privateData; virDomainObjPtr vm = NULL; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -2589,19 +2582,19 @@ static int qemuDomainSendKey(virDomainPtr domain, if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSendKey(priv->mon, holdtime, keycodes, nkeycodes); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2771,7 +2764,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SAVE, VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0) goto cleanup; @@ -2865,7 +2858,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virErrorRestore(&save_err); } } - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); if (ret == 0) qemuDomainRemoveInactiveJob(driver, vm); @@ -3130,7 +3123,7 @@ qemuDumpToFd(virQEMUDriverPtr driver, else g_clear_pointer(&jobPriv->current, qemuDomainJobInfoFree); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (dumpformat) { @@ -3141,14 +3134,14 @@ qemuDumpToFd(virQEMUDriverPtr driver, _("unsupported dumpformat '%s' " "for this QEMU binary"), dumpformat); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); return -1; } } ret = qemuMonitorDumpToFd(priv->mon, fd, dumpformat, detach); - if ((qemuDomainObjExitMonitor(driver, vm) < 0) || ret < 0) + if ((qemuDomainObjExitMonitor(vm) < 0) || ret < 0) return -1; if (detach) @@ -3281,8 +3274,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (virDomainCoreDumpWithFormatEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAsyncJob(driver, vm, - QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) goto cleanup; @@ -3329,9 +3321,9 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } else if (((resume && paused) || (flags & VIR_DUMP_RESET)) && virDomainObjIsActive(vm)) { if ((ret == 0) && (flags & VIR_DUMP_RESET)) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemReset(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; } @@ -3349,7 +3341,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } } - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); if (ret == 0 && flags & VIR_DUMP_CRASH) qemuDomainRemoveInactiveJob(driver, vm); @@ -3399,7 +3391,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -3447,12 +3439,12 @@ qemuDomainScreenshot(virDomainPtr dom, qemuSecurityDomainSetPathLabel(driver, vm, tmp, false); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorScreendump(priv->mon, videoAlias, screen, tmp) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (VIR_CLOSE(tmp_fd) < 0) { @@ -3473,7 +3465,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (unlink_tmp) unlink(tmp); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -3520,8 +3512,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, switch (action) { case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: - if (qemuDomainObjBeginAsyncJob(driver, vm, - QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) { return; @@ -3549,7 +3540,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); } static int @@ -3598,7 +3589,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, bool removeInactive = false; unsigned long flags = VIR_DUMP_MEMORY_ONLY; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) return; @@ -3664,7 +3655,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); if (removeInactive) qemuDomainRemoveInactiveJob(driver, vm); } @@ -3681,7 +3672,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, VIR_DEBUG("Removing device %s from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -3690,7 +3681,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, } if (STRPREFIX(devAlias, "vcpu")) { - qemuDomainRemoveVcpuAlias(driver, vm, devAlias); + qemuDomainRemoveVcpuAlias(vm, devAlias); } else { if (virDomainDefFindDevice(vm->def, devAlias, &dev, true) < 0) goto endjob; @@ -3704,7 +3695,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, devAlias); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -3905,8 +3896,7 @@ syncNicRxFilterMulticast(char *ifname, } static void -processNicRxFilterChangedEvent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +processNicRxFilterChangedEvent(virDomainObjPtr vm, const char *devAlias) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3920,7 +3910,7 @@ processNicRxFilterChangedEvent(virQEMUDriverPtr driver, "from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -3957,9 +3947,9 @@ processNicRxFilterChangedEvent(virQEMUDriverPtr driver, VIR_DEBUG("process NIC_RX_FILTER_CHANGED event for network " "device %s in domain %s", def->info.alias, vm->def->name); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorQueryRxFilter(priv->mon, devAlias, &guestFilter); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) goto endjob; @@ -4002,7 +3992,7 @@ processNicRxFilterChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virNetDevRxFilterFree(hostFilter); @@ -4048,7 +4038,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, memset(&dev, 0, sizeof(dev)); } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4089,13 +4079,12 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } static void -processBlockJobEvent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +processBlockJobEvent(virDomainObjPtr vm, const char *diskAlias, int type, int status) @@ -4103,7 +4092,7 @@ processBlockJobEvent(virQEMUDriverPtr driver, virDomainDiskDefPtr disk; g_autoptr(qemuBlockJobData) job = NULL; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4128,16 +4117,15 @@ processBlockJobEvent(virQEMUDriverPtr driver, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } static void -processJobStatusChangeEvent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +processJobStatusChangeEvent(virDomainObjPtr vm, qemuBlockJobDataPtr job) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4148,7 +4136,7 @@ processJobStatusChangeEvent(virQEMUDriverPtr driver, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -4163,7 +4151,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, unsigned int stopFlags = 0; virObjectEventPtr event = NULL; - if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY, true) < 0) + if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, true) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4194,7 +4182,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, endjob: qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -4292,20 +4280,19 @@ static void qemuProcessEventHandler(void *data, void *opaque) processDeviceDeletedEvent(driver, vm, processEvent->data); break; case QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED: - processNicRxFilterChangedEvent(driver, vm, processEvent->data); + processNicRxFilterChangedEvent(vm, processEvent->data); break; case QEMU_PROCESS_EVENT_SERIAL_CHANGED: processSerialChangedEvent(driver, vm, processEvent->data, processEvent->action); break; case QEMU_PROCESS_EVENT_BLOCK_JOB: - processBlockJobEvent(driver, vm, - processEvent->data, + processBlockJobEvent(vm, processEvent->data, processEvent->action, processEvent->status); break; case QEMU_PROCESS_EVENT_JOB_STATUS_CHANGE: - processJobStatusChangeEvent(driver, vm, processEvent->data); + processJobStatusChangeEvent(vm, processEvent->data); break; case QEMU_PROCESS_EVENT_MONITOR_EOF: processMonitorEOFEvent(driver, vm); @@ -4449,10 +4436,10 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (useAgent) { - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; } else { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; } @@ -4471,7 +4458,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (useAgent) qemuDomainObjEndAgentJob(vm); else - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -4594,7 +4581,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -4633,7 +4620,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -4722,7 +4709,7 @@ qemuDomainPinEmulator(virDomainPtr dom, if (virDomainPinEmulatorEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -4789,7 +4776,7 @@ qemuDomainPinEmulator(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: if (cgroup_emulator) @@ -4884,7 +4871,6 @@ qemuDomainGetVcpus(virDomainPtr dom, static int qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDefPtr def; int ret = -1; @@ -4908,7 +4894,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) goto cleanup; if (flags & VIR_DOMAIN_VCPU_GUEST) { - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4965,16 +4951,15 @@ qemuDomainGetMaxVcpus(virDomainPtr dom) static int -qemuDomainGetIOThreadsMon(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetIOThreadsMon(virDomainObjPtr vm, qemuMonitorIOThreadInfoPtr **iothreads) { qemuDomainObjPrivatePtr priv = vm->privateData; int niothreads = 0; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); niothreads = qemuMonitorGetIOThreads(priv->mon, iothreads); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || niothreads < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || niothreads < 0) return -1; return niothreads; @@ -4982,8 +4967,7 @@ qemuDomainGetIOThreadsMon(virQEMUDriverPtr driver, static int -qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetIOThreadsLive(virDomainObjPtr vm, virDomainIOThreadInfoPtr **info) { qemuDomainObjPrivatePtr priv; @@ -4993,7 +4977,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -5009,7 +4993,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, goto endjob; } - if ((niothreads = qemuDomainGetIOThreadsMon(driver, vm, &iothreads)) < 0) + if ((niothreads = qemuDomainGetIOThreadsMon(vm, &iothreads)) < 0) goto endjob; /* Nothing to do */ @@ -5043,7 +5027,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, ret = niothreads; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: if (info_ret) { @@ -5120,7 +5104,6 @@ qemuDomainGetIOThreadInfo(virDomainPtr dom, virDomainIOThreadInfoPtr **info, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDefPtr targetDef = NULL; int ret = -1; @@ -5138,7 +5121,7 @@ qemuDomainGetIOThreadInfo(virDomainPtr dom, goto cleanup; if (!targetDef) - ret = qemuDomainGetIOThreadsLive(driver, vm, info); + ret = qemuDomainGetIOThreadsLive(vm, info); else ret = qemuDomainGetIOThreadsConfig(targetDef, info); @@ -5182,7 +5165,7 @@ qemuDomainPinIOThread(virDomainPtr dom, if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5271,7 +5254,7 @@ qemuDomainPinIOThread(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: if (cgroup_iothread) @@ -5283,8 +5266,7 @@ qemuDomainPinIOThread(virDomainPtr dom, } static int -qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainHotplugAddIOThread(virDomainObjPtr vm, unsigned int iothread_id) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5304,7 +5286,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, if (qemuMonitorCreateObjectProps(&props, "iothread", alias, NULL) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAddObject(priv->mon, &props, NULL) < 0) goto exit_monitor; @@ -5319,7 +5301,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, &new_iothreads)) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (new_niothreads != exp_niothreads) { @@ -5368,14 +5350,13 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, return ret; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } static int -qemuDomainHotplugModIOThread(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainHotplugModIOThread(virDomainObjPtr vm, qemuMonitorIOThreadInfo iothread) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5387,11 +5368,11 @@ qemuDomainHotplugModIOThread(virQEMUDriverPtr driver, return -1; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetIOThread(priv->mon, &iothread); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc < 0) @@ -5402,8 +5383,7 @@ qemuDomainHotplugModIOThread(virQEMUDriverPtr driver, static int -qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainHotplugDelIOThread(virDomainObjPtr vm, unsigned int iothread_id) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5419,7 +5399,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, if (!(alias = g_strdup_printf("iothread%u", iothread_id))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDelObject(priv->mon, alias, true); exp_niothreads--; @@ -5430,7 +5410,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, &new_iothreads)) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (new_niothreads != exp_niothreads) { @@ -5460,7 +5440,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, return ret; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } @@ -5635,7 +5615,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5653,7 +5633,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, if (qemuDomainAddIOThreadCheck(def, iothread.iothread_id) < 0) goto endjob; - if (qemuDomainHotplugAddIOThread(driver, vm, iothread.iothread_id) < 0) + if (qemuDomainHotplugAddIOThread(vm, iothread.iothread_id) < 0) goto endjob; break; @@ -5662,7 +5642,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, if (qemuDomainDelIOThreadCheck(def, iothread.iothread_id) < 0) goto endjob; - if (qemuDomainHotplugDelIOThread(driver, vm, iothread.iothread_id) < 0) + if (qemuDomainHotplugDelIOThread(vm, iothread.iothread_id) < 0) goto endjob; break; @@ -5675,7 +5655,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, goto endjob; } - if (qemuDomainHotplugModIOThread(driver, vm, iothread) < 0) + if (qemuDomainHotplugModIOThread(vm, iothread) < 0) goto endjob; break; @@ -5722,7 +5702,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -6066,14 +6046,14 @@ qemuDomainRestoreFlags(virConnectPtr conn, priv->hookRun = true; } - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, flags) < 0) goto cleanup; ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, false, QEMU_ASYNC_JOB_START); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: virDomainDefFree(def); @@ -6595,7 +6575,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) goto cleanup; @@ -6613,7 +6593,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) ret = 0; endjob: - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -6743,7 +6723,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!vm->persistent) { @@ -6839,7 +6819,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -6872,7 +6852,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, break; case VIR_DOMAIN_DEVICE_CONTROLLER: - ret = qemuDomainAttachControllerDevice(driver, vm, dev->data.controller); + ret = qemuDomainAttachControllerDevice(vm, dev->data.controller); if (!ret) { alias = dev->data.controller->info.alias; dev->data.controller = NULL; @@ -6959,7 +6939,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, break; case VIR_DOMAIN_DEVICE_INPUT: - ret = qemuDomainAttachInputDevice(driver, vm, dev->data.input); + ret = qemuDomainAttachInputDevice(vm, dev->data.input); if (ret == 0) { alias = dev->data.input->info.alias; dev->data.input = NULL; @@ -6967,7 +6947,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, break; case VIR_DOMAIN_DEVICE_VSOCK: - ret = qemuDomainAttachVsockDevice(driver, vm, dev->data.vsock); + ret = qemuDomainAttachVsockDevice(vm, dev->data.vsock); if (ret == 0) { alias = dev->data.vsock->info.alias; dev->data.vsock = NULL; @@ -7004,7 +6984,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, } if (ret == 0) - ret = qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE); + ret = qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE); return ret; } @@ -7096,7 +7076,7 @@ qemuDomainUpdateDeviceLive(virDomainObjPtr vm, return -1; } - ret = qemuDomainChangeNet(driver, vm, dev); + ret = qemuDomainChangeNet(vm, dev); break; case VIR_DOMAIN_DEVICE_FS: @@ -7816,7 +7796,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -7828,7 +7808,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -7873,7 +7853,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -7942,7 +7922,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainDefFree(vmdef); @@ -8012,7 +7992,7 @@ qemuDomainDetachDeviceLiveAndConfig(virQEMUDriverPtr driver, if ((rc = qemuDomainDetachDeviceLive(vm, dev_copy, driver, false)) < 0) goto cleanup; - if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; /* @@ -8095,7 +8075,7 @@ qemuDomainDetachDeviceAliasLiveAndConfig(virQEMUDriverPtr driver, if ((rc = qemuDomainDetachDeviceLive(vm, &dev, driver, true)) < 0) goto cleanup; - if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; } @@ -8128,7 +8108,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8140,7 +8120,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -8163,7 +8143,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8175,7 +8155,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -8236,7 +8216,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, autostart = (autostart != 0); if (vm->autostart != autostart) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name))) @@ -8274,7 +8254,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, vm->autostart = autostart; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } ret = 0; @@ -8382,7 +8362,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -8416,7 +8396,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -8558,7 +8538,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; /* QEMU and LXC implementation are identical */ @@ -8589,7 +8569,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -8812,7 +8792,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, } } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -8867,7 +8847,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virBitmapFree(nodeset); @@ -9021,7 +9001,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9063,7 +9043,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9076,7 +9056,6 @@ qemuDomainGetPerfEvents(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuDomainObjPrivatePtr priv; virDomainDefPtr def; @@ -9096,7 +9075,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(def = virDomainObjGetOneDef(vm, flags))) @@ -9125,7 +9104,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9299,7 +9278,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9533,7 +9512,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainDefFree(persistentDefCopy); @@ -9799,7 +9778,6 @@ qemuDomainBlockResize(virDomainPtr dom, unsigned long long size, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; int ret = -1; @@ -9828,7 +9806,7 @@ qemuDomainBlockResize(virDomainPtr dom, if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -9862,18 +9840,18 @@ qemuDomainBlockResize(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorBlockResize(priv->mon, device, nodename, size) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9898,7 +9876,6 @@ qemuDomainBlockStatsGatherTotals(qemuBlockStatsPtr data, /** * qemuDomainBlocksStatsGather: - * @driver: driver object * @vm: domain object * @path: to gather the statistics for * @capacity: refresh capacity of the backing image @@ -9909,8 +9886,7 @@ qemuDomainBlockStatsGatherTotals(qemuBlockStatsPtr data, * Returns -1 on error; number of filled block statistics on success. */ static int -qemuDomainBlocksStatsGather(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainBlocksStatsGather(virDomainObjPtr vm, const char *path, bool capacity, qemuBlockStatsPtr *retstats) @@ -9945,7 +9921,7 @@ qemuDomainBlocksStatsGather(virQEMUDriverPtr driver, } } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); nstats = qemuMonitorGetAllBlockStatsInfo(priv->mon, &blockstats, false); if (capacity && nstats >= 0) { @@ -9955,7 +9931,7 @@ qemuDomainBlocksStatsGather(virQEMUDriverPtr driver, rc = qemuMonitorBlockStatsUpdateCapacity(priv->mon, blockstats, false); } - if (qemuDomainObjExitMonitor(driver, vm) < 0 || nstats < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || nstats < 0 || rc < 0) goto cleanup; if (VIR_ALLOC(*retstats) < 0) @@ -10017,7 +9993,6 @@ qemuDomainBlockStats(virDomainPtr dom, const char *path, virDomainBlockStatsPtr stats) { - virQEMUDriverPtr driver = dom->conn->privateData; qemuBlockStatsPtr blockstats = NULL; int ret = -1; virDomainObjPtr vm; @@ -10028,13 +10003,13 @@ qemuDomainBlockStats(virDomainPtr dom, if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (qemuDomainBlocksStatsGather(driver, vm, path, false, &blockstats) < 0) + if (qemuDomainBlocksStatsGather(vm, path, false, &blockstats) < 0) goto endjob; if (VIR_ASSIGN_IS_OVERFLOW(stats->rd_req, blockstats->rd_req) || @@ -10051,7 +10026,7 @@ qemuDomainBlockStats(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -10067,7 +10042,6 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuBlockStatsPtr blockstats = NULL; int nstats; @@ -10086,13 +10060,13 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if ((nstats = qemuDomainBlocksStatsGather(driver, vm, path, false, + if ((nstats = qemuDomainBlocksStatsGather(vm, path, false, &blockstats)) < 0) goto endjob; @@ -10139,7 +10113,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, *nparams = nstats; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(blockstats); @@ -10231,7 +10205,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10405,7 +10379,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virNetDevBandwidthFree(bandwidth); @@ -10525,8 +10499,7 @@ qemuDomainGetInterfaceParameters(virDomainPtr dom, /* This functions assumes that job QEMU_JOB_QUERY is started by a caller */ static int -qemuDomainMemoryStatsInternal(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainMemoryStatsInternal(virDomainObjPtr vm, virDomainMemoryStatPtr stats, unsigned int nr_stats) @@ -10538,10 +10511,10 @@ qemuDomainMemoryStatsInternal(virQEMUDriverPtr driver, return -1; if (virDomainDefHasMemballoon(vm->def)) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetMemoryStats(qemuDomainGetMonitor(vm), vm->def->memballoon, stats, nr_stats); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0 || ret >= nr_stats) @@ -10568,7 +10541,6 @@ qemuDomainMemoryStats(virDomainPtr dom, unsigned int nr_stats, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -10580,12 +10552,12 @@ qemuDomainMemoryStats(virDomainPtr dom, if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; - ret = qemuDomainMemoryStatsInternal(driver, vm, stats, nr_stats); + ret = qemuDomainMemoryStatsInternal(vm, stats, nr_stats); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -10685,7 +10657,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10704,19 +10676,19 @@ qemuDomainMemoryPeek(virDomainPtr dom, qemuSecurityDomainSetPathLabel(driver, vm, tmp, false); priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (flags == VIR_MEMORY_VIRTUAL) { if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } } else { if (qemuMonitorSavePhysicalMemory(priv->mon, offset, size, tmp) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; /* Read the memory file into buffer. */ @@ -10730,7 +10702,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FORCE_CLOSE(fd); @@ -10965,7 +10937,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(disk = virDomainDiskByName(vm->def, path, false))) { @@ -10994,7 +10966,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, goto endjob; } - if (qemuDomainBlocksStatsGather(driver, vm, path, true, &entry) < 0) + if (qemuDomainBlocksStatsGather(vm, path, true, &entry) < 0) goto endjob; if (!entry->wr_highest_offset_valid) { @@ -11037,7 +11009,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(entry); virDomainObjEndAPI(&vm); @@ -12518,8 +12490,7 @@ qemuConnectBaselineHypervisorCPU(virConnectPtr conn, static int -qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetJobInfoMigrationStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -12531,13 +12502,13 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver, jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { if (events && jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && - qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_NONE, jobInfo, NULL) < 0) return -1; if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION && - qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_NONE, jobInfo) < 0) return -1; @@ -12550,20 +12521,19 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver, static int -qemuDomainGetJobInfoDumpStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorDumpStats stats = { 0 }; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; rc = qemuMonitorQueryDump(priv->mon, &stats); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; jobInfo->stats.dump = stats; @@ -12601,8 +12571,7 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriverPtr driver, static int -qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetJobStatsInternal(virDomainObjPtr vm, bool completed, qemuDomainJobInfoPtr *jobInfo) { @@ -12626,7 +12595,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, return -1; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -12641,17 +12610,17 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, switch ((*jobInfo)->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobInfo) < 0) + if (qemuDomainGetJobInfoMigrationStats(vm, *jobInfo) < 0) goto cleanup; break; case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobInfo) < 0) + if (qemuDomainGetJobInfoDumpStats(vm, *jobInfo) < 0) goto cleanup; break; case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - if (qemuBackupGetJobInfoStats(driver, vm, *jobInfo) < 0) + if (qemuBackupGetJobInfoStats(vm, *jobInfo) < 0) goto cleanup; break; @@ -12662,7 +12631,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, ret = 0; cleanup: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -12671,7 +12640,6 @@ static int qemuDomainGetJobInfo(virDomainPtr dom, virDomainJobInfoPtr info) { - virQEMUDriverPtr driver = dom->conn->privateData; g_autoptr(qemuDomainJobInfo) jobInfo = NULL; virDomainObjPtr vm; int ret = -1; @@ -12684,7 +12652,7 @@ qemuDomainGetJobInfo(virDomainPtr dom, if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobInfo) < 0) + if (qemuDomainGetJobStatsInternal(vm, false, &jobInfo) < 0) goto cleanup; if (!jobInfo || @@ -12708,7 +12676,6 @@ qemuDomainGetJobStats(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; qemuDomainJobPrivatePtr jobPriv; @@ -12727,7 +12694,7 @@ qemuDomainGetJobStats(virDomainPtr dom, priv = vm->privateData; jobPriv = priv->job.privateData; - if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) + if (qemuDomainGetJobStatsInternal(vm, completed, &jobInfo) < 0) goto cleanup; if (!jobInfo || @@ -12759,9 +12726,9 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) VIR_DEBUG("Cancelling migration job at client request"); qemuDomainObjAbortAsyncJob(vm); - qemuDomainObjEnterMonitor(priv->driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateCancel(priv->mon); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -12770,7 +12737,6 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) static int qemuDomainAbortJob(virDomainPtr dom) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -12783,7 +12749,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_ABORT) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -12852,7 +12818,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -12865,7 +12831,6 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, unsigned long long downtime, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; g_autoptr(qemuMigrationParams) migParams = NULL; @@ -12880,7 +12845,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -12899,20 +12864,20 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, downtime) < 0) goto endjob; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetMigrationDowntime(priv->mon, downtime); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -12925,7 +12890,6 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, unsigned long long *downtime, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuMigrationParamsPtr migParams = NULL; int ret = -1; @@ -12939,13 +12903,13 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, &migParams) < 0) goto endjob; @@ -12965,7 +12929,7 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: qemuMigrationParamsFree(migParams); @@ -12979,7 +12943,6 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, unsigned long long *cacheSize, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; g_autoptr(qemuMigrationParams) migParams = NULL; @@ -12994,7 +12957,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13010,7 +12973,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, } if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_XBZRLE_CACHE_SIZE)) { - if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, &migParams) < 0) goto endjob; @@ -13019,16 +12982,16 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, cacheSize) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorGetMigrationCacheSize(priv->mon, cacheSize); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -13040,7 +13003,6 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, unsigned long long cacheSize, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; g_autoptr(qemuMigrationParams) migParams = NULL; @@ -13055,7 +13017,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13080,20 +13042,20 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, cacheSize) < 0) goto endjob; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetMigrationCacheSize(priv->mon, cacheSize); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -13105,7 +13067,6 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, unsigned long bandwidth, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; bool postcopy = !!(flags & VIR_DOMAIN_MIGRATE_MAX_SPEED_POSTCOPY); @@ -13141,7 +13102,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13166,15 +13127,15 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, bandwidth * 1024 * 1024) < 0) goto endjob; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } @@ -13184,7 +13145,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -13193,8 +13154,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, static int -qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, unsigned long *bandwidth) { g_autoptr(qemuMigrationParams) migParams = NULL; @@ -13202,13 +13162,13 @@ qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriverPtr driver, int rc; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) goto cleanup; - if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, &migParams) < 0) goto cleanup; @@ -13239,7 +13199,7 @@ qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriverPtr driver, ret = 0; cleanup: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -13249,7 +13209,6 @@ qemuDomainMigrateGetMaxSpeed(virDomainPtr dom, unsigned long *bandwidth, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; bool postcopy = !!(flags & VIR_DOMAIN_MIGRATE_MAX_SPEED_POSTCOPY); @@ -13266,7 +13225,7 @@ qemuDomainMigrateGetMaxSpeed(virDomainPtr dom, goto cleanup; if (postcopy) { - if (qemuDomainMigrationGetPostcopyBandwidth(driver, vm, bandwidth) < 0) + if (qemuDomainMigrationGetPostcopyBandwidth(vm, bandwidth) < 0) goto cleanup; } else { *bandwidth = priv->migMaxBandwidth; @@ -13284,7 +13243,6 @@ static int qemuDomainMigrateStartPostCopy(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; int ret = -1; @@ -13297,7 +13255,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13320,13 +13278,13 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, } VIR_DEBUG("Starting post-copy"); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateStartPostCopy(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14023,7 +13981,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, if (virDomainQemuMonitorCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14035,13 +13993,13 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14203,8 +14161,7 @@ qemuDomainOpenChannel(virDomainPtr dom, * abort with pivot; this updates the VM definition as appropriate, on * either success or failure. */ static int -qemuDomainBlockPivot(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainBlockPivot(virDomainObjPtr vm, qemuBlockJobDataPtr job, virDomainDiskDefPtr disk) { @@ -14294,7 +14251,7 @@ qemuDomainBlockPivot(virQEMUDriverPtr driver, break; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (blockdev) { int rc = 0; @@ -14314,7 +14271,7 @@ qemuDomainBlockPivot(virQEMUDriverPtr driver, } else { ret = qemuMonitorDrivePivot(priv->mon, job->name); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; /* The pivot failed. The block job in QEMU remains in the synchronised state */ @@ -14339,7 +14296,6 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; const char *device = NULL; const char *jobname = NULL; virDomainDiskDefPtr disk; @@ -14361,7 +14317,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14439,7 +14395,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, device = job->name; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (!blockdev && baseSource) basePath = qemuMonitorDiskNameLookup(priv->mon, device, disk->src, baseSource); @@ -14448,7 +14404,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, (!baseSource || basePath)) ret = qemuMonitorBlockStream(priv->mon, device, jobname, persistjob, basePath, nodebase, backingPath, speed); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -14457,7 +14413,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, qemuBlockJobStarted(job, vm); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: qemuBlockJobStartupFinalize(vm, job); @@ -14491,7 +14447,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14521,15 +14477,15 @@ qemuDomainBlockJobAbort(virDomainPtr dom, qemuBlockJobSyncBegin(job); if (pivot) { - if ((ret = qemuDomainBlockPivot(driver, vm, job, disk)) < 0) + if ((ret = qemuDomainBlockPivot(vm, job, disk)) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (blockdev) ret = qemuMonitorJobCancel(priv->mon, job->name, false); else ret = qemuMonitorBlockJobCancel(priv->mon, job->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -1; goto endjob; } @@ -14574,7 +14530,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, endjob: if (job && !async) qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14636,7 +14592,6 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, virDomainBlockJobInfoPtr info, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDiskDefPtr disk; int ret = -1; @@ -14652,7 +14607,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14666,9 +14621,9 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetBlockJobInfo(qemuDomainGetMonitor(vm), job->name, &rawInfo); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret <= 0) goto endjob; @@ -14680,7 +14635,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14694,7 +14649,6 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, unsigned long bandwidth, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainDiskDefPtr disk; int ret = -1; virDomainObjPtr vm; @@ -14720,7 +14674,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14735,15 +14689,15 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorBlockJobSetSpeed(qemuDomainGetMonitor(vm), job->name, speed); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14922,7 +14876,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -15130,9 +15084,9 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, } if (data) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuBlockStorageSourceChainAttach(priv->mon, data); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) @@ -15151,7 +15105,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE; /* Actually start the mirroring */ - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (blockdev) { ret = qemuMonitorBlockdevMirror(priv->mon, job->name, true, @@ -15167,7 +15121,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, } virDomainAuditDisk(vm, NULL, mirror, "mirror", ret >= 0); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) { qemuDomainStorageSourceChainAccessRevoke(driver, vm, mirror); @@ -15185,12 +15139,12 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (ret < 0 && virDomainObjIsActive(vm)) { if (data || crdata) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (data) qemuBlockStorageSourceChainDetach(priv->mon, data); if (crdata) qemuBlockStorageSourceAttachRollback(priv->mon, crdata->srcdata[0]); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } if (need_revoke) qemuDomainStorageSourceChainAccessRevoke(driver, vm, mirror); @@ -15198,7 +15152,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (need_unlink && virStorageFileUnlink(mirror) < 0) VIR_WARN("%s", _("unable to remove just-created copy target")); virStorageFileDeinit(mirror); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); qemuBlockJobStartupFinalize(vm, job); return ret; @@ -15422,7 +15376,7 @@ qemuDomainBlockCommit(virDomainPtr dom, if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -15599,7 +15553,7 @@ qemuDomainBlockCommit(virDomainPtr dom, device = job->name; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (!blockdev) { basePath = qemuMonitorDiskNameLookup(priv->mon, device, disk->src, @@ -15613,7 +15567,7 @@ qemuDomainBlockCommit(virDomainPtr dom, topPath, nodetop, basePath, nodebase, backingPath, speed); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || ret < 0) { + if (qemuDomainObjExitMonitor(vm) < 0 || ret < 0) { ret = -1; goto endjob; } @@ -15638,7 +15592,7 @@ qemuDomainBlockCommit(virDomainPtr dom, virErrorRestore(&orig_err); } qemuBlockJobStartupFinalize(vm, job); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -15665,7 +15619,7 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -15703,14 +15657,14 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, fd, "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH) != 0); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -15778,14 +15732,14 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom, if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1], "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); if (ret < 0) goto cleanup; @@ -16024,7 +15978,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, cfg = virQEMUDriverGetConfig(driver); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; priv = vm->privateData; @@ -16235,12 +16189,12 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, /* NB: Let's let QEMU decide how to handle issues with _length * via the JSON error code from the block_set_io_throttle call */ - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSetBlockIoThrottle(priv->mon, drivealias, qdevid, &info, supportMaxOptions, set_fields & QEMU_BLOCK_IOTUNE_SET_GROUP_NAME, supportMaxLengthOptions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) goto endjob; @@ -16290,7 +16244,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(info.group_name); @@ -16309,7 +16263,6 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, unsigned int flags) { virDomainDiskDefPtr disk; - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuDomainObjPrivatePtr priv = NULL; virDomainDefPtr def = NULL; @@ -16335,7 +16288,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; /* the API check guarantees that only one of the definitions will be set */ @@ -16377,9 +16330,9 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (!(drivealias = qemuAliasDiskDriveFromDisk(disk))) goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetBlockIoThrottle(priv->mon, drivealias, qdevid, &reply); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (ret < 0) goto endjob; @@ -16448,7 +16401,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(reply.group_name); @@ -16462,7 +16415,6 @@ qemuDomainGetDiskErrors(virDomainPtr dom, unsigned int nerrors, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuDomainObjPrivatePtr priv; virHashTablePtr table = NULL; @@ -16482,7 +16434,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16493,9 +16445,9 @@ qemuDomainGetDiskErrors(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); table = qemuMonitorGetBlockInfo(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (!table) goto endjob; @@ -16523,7 +16475,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, ret = n; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -16559,7 +16511,7 @@ qemuDomainSetMetadata(virDomainPtr dom, if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; ret = virDomainObjSetMetadata(vm, type, metadata, key, uri, @@ -16572,7 +16524,7 @@ qemuDomainSetMetadata(virDomainPtr dom, virObjectEventStateQueue(driver->domainEventState, ev); } - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -16652,17 +16604,16 @@ qemuDomainGetCPUStats(virDomainPtr domain, static int -qemuDomainProbeQMPCurrentMachine(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainProbeQMPCurrentMachine(virDomainObjPtr vm, bool *wakeupSupported) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorCurrentMachineInfo info = { 0 }; int rv; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rv = qemuMonitorGetCurrentMachineInfo(priv->mon, &info); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; @@ -16673,8 +16624,7 @@ qemuDomainProbeQMPCurrentMachine(virQEMUDriverPtr driver, /* returns -1 on error, or if query is not supported, 0 if query was successful */ static int -qemuDomainQueryWakeupSuspendSupport(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, bool *wakeupSupported) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -16683,29 +16633,28 @@ qemuDomainQueryWakeupSuspendSupport(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE)) return -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) goto endjob; - ret = qemuDomainProbeQMPCurrentMachine(driver, vm, wakeupSupported); + ret = qemuDomainProbeQMPCurrentMachine(vm, wakeupSupported); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } static int -qemuDomainPMSuspendAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainPMSuspendAgent(virDomainObjPtr vm, unsigned int target) { qemuAgentPtr agent; int ret = -1; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -16730,7 +16679,6 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom, unsigned long long duration, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; bool wakeupSupported; @@ -16765,7 +16713,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom, * that don't know about this cap, will keep their old behavior of * suspending 'in the dark'. */ - if (qemuDomainQueryWakeupSuspendSupport(driver, vm, &wakeupSupported) == 0) { + if (qemuDomainQueryWakeupSuspendSupport(vm, &wakeupSupported) == 0) { if (!wakeupSupported) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("Domain does not have suspend support")); @@ -16790,7 +16738,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom, } } - ret = qemuDomainPMSuspendAgent(driver, vm, target); + ret = qemuDomainPMSuspendAgent(vm, target); cleanup: virDomainObjEndAPI(&vm); @@ -16801,7 +16749,6 @@ static int qemuDomainPMWakeup(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -16814,7 +16761,7 @@ qemuDomainPMWakeup(virDomainPtr dom, if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16822,13 +16769,13 @@ qemuDomainPMWakeup(virDomainPtr dom, priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemWakeup(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -16871,7 +16818,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16946,7 +16893,6 @@ qemuDomainFSTrim(virDomainPtr dom, unsigned long long minimum, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuAgentPtr agent; int ret = -1; @@ -16966,7 +16912,7 @@ qemuDomainFSTrim(virDomainPtr dom, if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -17130,14 +17076,13 @@ qemuConnectGetCPUModelNames(virConnectPtr conn, static int -qemuDomainGetHostnameAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetHostnameAgent(virDomainObjPtr vm, char **hostname) { qemuAgentPtr agent; int ret = -1; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17158,8 +17103,7 @@ qemuDomainGetHostnameAgent(virQEMUDriverPtr driver, static int -qemuDomainGetHostnameLease(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetHostnameLease(virDomainObjPtr vm, char **hostname) { char macaddr[VIR_MAC_STRING_BUFLEN]; @@ -17169,7 +17113,7 @@ qemuDomainGetHostnameLease(virQEMUDriverPtr driver, size_t i, j; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17211,7 +17155,7 @@ qemuDomainGetHostnameLease(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -17220,7 +17164,6 @@ static char * qemuDomainGetHostname(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; char *hostname = NULL; @@ -17241,10 +17184,10 @@ qemuDomainGetHostname(virDomainPtr dom, goto cleanup; if (flags & VIR_DOMAIN_GET_HOSTNAME_AGENT) { - if (qemuDomainGetHostnameAgent(driver, vm, &hostname) < 0) + if (qemuDomainGetHostnameAgent(vm, &hostname) < 0) goto cleanup; } else if (flags & VIR_DOMAIN_GET_HOSTNAME_LEASE) { - if (qemuDomainGetHostnameLease(driver, vm, &hostname) < 0) + if (qemuDomainGetHostnameLease(vm, &hostname) < 0) goto cleanup; } @@ -17267,7 +17210,6 @@ qemuDomainGetTime(virDomainPtr dom, unsigned int *nseconds, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; int ret = -1; @@ -17281,7 +17223,7 @@ qemuDomainGetTime(virDomainPtr dom, if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17309,8 +17251,7 @@ qemuDomainGetTime(virDomainPtr dom, static int -qemuDomainSetTimeAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainSetTimeAgent(virDomainObjPtr vm, long long seconds, unsigned int nseconds, bool rtcSync) @@ -17318,7 +17259,7 @@ qemuDomainSetTimeAgent(virQEMUDriverPtr driver, qemuAgentPtr agent; int ret = -1; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17343,7 +17284,6 @@ qemuDomainSetTime(virDomainPtr dom, unsigned int nseconds, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; bool rtcSync = flags & VIR_DOMAIN_TIME_SYNC; @@ -17372,10 +17312,10 @@ qemuDomainSetTime(virDomainPtr dom, goto cleanup; } - if (qemuDomainSetTimeAgent(driver, vm, seconds, nseconds, rtcSync) < 0) + if (qemuDomainSetTimeAgent(vm, seconds, nseconds, rtcSync) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17383,9 +17323,9 @@ qemuDomainSetTime(virDomainPtr dom, /* Don't try to call rtc-reset-reinjection if it's not available */ if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_RTC_RESET_REINJECTION)) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rv = qemuMonitorRTCResetReinjection(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rv < 0) @@ -17395,7 +17335,7 @@ qemuDomainSetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -17409,7 +17349,6 @@ qemuDomainFSFreeze(virDomainPtr dom, unsigned int nmountpoints, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -17421,7 +17360,7 @@ qemuDomainFSFreeze(virDomainPtr dom, if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17444,7 +17383,6 @@ qemuDomainFSThaw(virDomainPtr dom, unsigned int nmountpoints, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -17462,7 +17400,7 @@ qemuDomainFSThaw(virDomainPtr dom, if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17891,7 +17829,7 @@ qemuDomainGetStatsMemory(virQEMUDriverPtr driver, static int -qemuDomainGetStatsBalloon(virQEMUDriverPtr driver, +qemuDomainGetStatsBalloon(virQEMUDriverPtr driver G_GNUC_UNUSED, virDomainObjPtr dom, virTypedParamListPtr params, unsigned int privflags) @@ -17917,7 +17855,7 @@ qemuDomainGetStatsBalloon(virQEMUDriverPtr driver, if (!HAVE_JOB(privflags) || !virDomainObjIsActive(dom)) return 0; - nr_stats = qemuDomainMemoryStatsInternal(driver, dom, stats, + nr_stats = qemuDomainMemoryStatsInternal(dom, stats, VIR_DOMAIN_MEMORY_STAT_NR); if (nr_stats < 0) return 0; @@ -17949,7 +17887,7 @@ qemuDomainGetStatsBalloon(virQEMUDriverPtr driver, static int -qemuDomainGetStatsVcpu(virQEMUDriverPtr driver, +qemuDomainGetStatsVcpu(virQEMUDriverPtr driver G_GNUC_UNUSED, virDomainObjPtr dom, virTypedParamListPtr params, unsigned int privflags) @@ -17974,7 +17912,7 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver, goto cleanup; if (HAVE_JOB(privflags) && virDomainObjIsActive(dom) && - qemuDomainRefreshVcpuHalted(driver, dom, QEMU_ASYNC_JOB_NONE) < 0) { + qemuDomainRefreshVcpuHalted(dom, QEMU_ASYNC_JOB_NONE) < 0) { /* it's ok to be silent and go ahead, because halted vcpu info * wasn't here from the beginning */ virResetLastError(); @@ -18395,7 +18333,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver, bool visitBacking = !!(privflags & QEMU_DOMAIN_STATS_BACKING); if (HAVE_JOB(privflags) && virDomainObjIsActive(dom)) { - qemuDomainObjEnterMonitor(driver, dom); + qemuDomainObjEnterMonitor(dom); rc = qemuMonitorGetAllBlockStatsInfo(priv->mon, &stats, visitBacking); @@ -18410,7 +18348,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver, if (fetchnodedata) nodedata = qemuMonitorQueryNamedBlockNodes(priv->mon); - if (qemuDomainObjExitMonitor(driver, dom) < 0) + if (qemuDomainObjExitMonitor(dom) < 0) goto cleanup; /* failure to retrieve stats is fine at this point */ @@ -18449,7 +18387,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver, static int -qemuDomainGetStatsIOThread(virQEMUDriverPtr driver, +qemuDomainGetStatsIOThread(virQEMUDriverPtr driver G_GNUC_UNUSED, virDomainObjPtr dom, virTypedParamListPtr params, unsigned int privflags) @@ -18466,7 +18404,7 @@ qemuDomainGetStatsIOThread(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) return 0; - if ((niothreads = qemuDomainGetIOThreadsMon(driver, dom, &iothreads)) < 0) + if ((niothreads = qemuDomainGetIOThreadsMon(dom, &iothreads)) < 0) return -1; /* qemuDomainGetIOThreadsMon returns a NULL-terminated list, so we must free @@ -18713,9 +18651,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, int rv; if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT) - rv = qemuDomainObjBeginJobNowait(driver, vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJobNowait(vm, QEMU_JOB_QUERY); else - rv = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY); if (rv == 0) domflags |= QEMU_DOMAIN_STATS_HAVE_JOB; @@ -18726,7 +18664,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, domflags |= QEMU_DOMAIN_STATS_BACKING; if (qemuDomainGetStats(conn, vm, stats, &tmp, domflags) < 0) { if (HAVE_JOB(domflags) && vm) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); virObjectUnlock(vm); goto cleanup; @@ -18736,7 +18674,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, tmpstats[nstats++] = tmp; if (HAVE_JOB(domflags)) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); virObjectUnlock(vm); } @@ -18777,15 +18715,13 @@ qemuNodeAllocPages(virConnectPtr conn, } static int -qemuDomainGetFSInfoAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetFSInfoAgent(virDomainObjPtr vm, qemuAgentFSInfoPtr **info) { int ret = -1; qemuAgentPtr agent; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) return ret; if (virDomainObjCheckActive(vm) < 0) @@ -18879,7 +18815,6 @@ qemuDomainGetFSInfo(virDomainPtr dom, virDomainFSInfoPtr **info, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuAgentFSInfoPtr *agentinfo = NULL; int ret = -1; @@ -18893,10 +18828,10 @@ qemuDomainGetFSInfo(virDomainPtr dom, if (virDomainGetFSInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if ((nfs = qemuDomainGetFSInfoAgent(driver, vm, &agentinfo)) < 0) + if ((nfs = qemuDomainGetFSInfoAgent(vm, &agentinfo)) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18905,7 +18840,7 @@ qemuDomainGetFSInfo(virDomainPtr dom, ret = virDomainFSInfoFormat(agentinfo, nfs, vm->def, info); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: g_free(agentinfo); @@ -18920,7 +18855,6 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, unsigned int source, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; int ret = -1; @@ -18942,7 +18876,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, break; case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT: - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -18980,7 +18914,6 @@ qemuDomainSetUserPassword(virDomainPtr dom, const char *password, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuAgentPtr agent; int ret = -1; @@ -18994,7 +18927,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19146,7 +19079,7 @@ static int qemuDomainRename(virDomainPtr dom, if (virDomainRenameEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { @@ -19193,7 +19126,7 @@ static int qemuDomainRename(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -19269,7 +19202,6 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, unsigned int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; qemuAgentCPUInfoPtr info = NULL; @@ -19284,7 +19216,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19318,7 +19250,6 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, int state, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; virBitmapPtr map = NULL; qemuAgentCPUInfoPtr info = NULL; @@ -19343,7 +19274,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19436,7 +19367,7 @@ qemuDomainSetVcpu(virDomainPtr dom, if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -19463,7 +19394,7 @@ qemuDomainSetVcpu(virDomainPtr dom, ret = qemuDomainSetVcpuInternal(driver, vm, def, persistentDef, map, !!state); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virBitmapFree(map); @@ -19478,7 +19409,6 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, unsigned long long threshold, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; qemuDomainObjPrivatePtr priv; virDomainObjPtr vm = NULL; virStorageSourcePtr src; @@ -19496,7 +19426,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19513,7 +19443,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && !src->nodestorage && - qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockNodeNamesDetect(vm, QEMU_ASYNC_JOB_NONE) < 0) goto endjob; if (!src->nodestorage) { @@ -19525,15 +19455,15 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, nodename = g_strdup(src->nodestorage); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetBlockThreshold(priv->mon, nodename, threshold); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -19591,7 +19521,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -19622,7 +19552,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -19703,8 +19633,7 @@ qemuNodeGetSEVInfo(virConnectPtr conn, static int -qemuDomainGetSEVMeasurement(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetSEVMeasurement(virDomainObjPtr vm, virTypedParameterPtr *params, int *nparams, unsigned int flags) @@ -19715,13 +19644,13 @@ qemuDomainGetSEVMeasurement(virQEMUDriverPtr driver, virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); tmp = qemuMonitorGetSEVMeasurement(QEMU_DOMAIN_PRIVATE(vm)->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (!tmp) @@ -19735,7 +19664,7 @@ qemuDomainGetSEVMeasurement(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -19746,7 +19675,6 @@ qemuDomainGetLaunchSecurityInfo(virDomainPtr domain, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = domain->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -19757,7 +19685,7 @@ qemuDomainGetLaunchSecurityInfo(virDomainPtr domain, goto cleanup; if (vm->def->sev) { - if (qemuDomainGetSEVMeasurement(driver, vm, params, nparams, flags) < 0) + if (qemuDomainGetSEVMeasurement(vm, params, nparams, flags) < 0) goto cleanup; } @@ -19893,7 +19821,6 @@ qemuDomainGetGuestInfo(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; int ret = -1; @@ -19917,8 +19844,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, if (virDomainGetGuestInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19969,7 +19895,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuDomainObjEndAgentJob(vm); if (nfs > 0) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19980,7 +19906,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuAgentFSInfoFormatParams(agentfsinfo, nfs, vm->def, params, nparams, &maxparams); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } cleanup: diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index e2c6e14c2e..7b626ee383 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -99,14 +99,13 @@ qemuDomainDeleteDevice(virDomainObjPtr vm, const char *alias) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDelDevice(priv->mon, alias); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { /* Domain is no longer running. No cleanup needed. */ return -1; } @@ -231,7 +230,6 @@ qemuHotplugWaitForTrayEject(virDomainObjPtr vm, /** * qemuDomainChangeMediaLegacy: - * @driver: qemu driver structure * @vm: domain definition * @disk: disk definition to change the source of * @newsrc: new disk source to change to @@ -245,8 +243,7 @@ qemuHotplugWaitForTrayEject(virDomainObjPtr vm, * Returns 0 on success, -1 on error and reports libvirt error */ static int -qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeMediaLegacy(virDomainObjPtr vm, virDomainDiskDefPtr disk, virStorageSourcePtr newsrc, bool force) @@ -267,9 +264,9 @@ qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, if (!(driveAlias = qemuAliasDiskDriveFromDisk(disk))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorEjectMedia(priv->mon, driveAlias, force); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; /* If the tray is present wait for it to open. */ @@ -279,9 +276,9 @@ qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, return -1; /* re-issue ejection command to pop out the media */ - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorEjectMedia(priv->mon, driveAlias, false); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; } else { @@ -297,12 +294,12 @@ qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, if (virStorageSourceGetActualType(newsrc) != VIR_STORAGE_TYPE_DIR) format = virStorageFileFormatTypeToString(newsrc->format); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorChangeMedia(priv->mon, driveAlias, sourcestr, format); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } @@ -343,7 +340,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, if (!(props = qemuBuildDBusVMStateInfoProps(driver, vm))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorAddObject(priv->mon, &props, NULL); @@ -351,7 +348,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, if (ret == 0) priv->dbusVMState = true; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; @@ -370,8 +367,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, * Returns: 0 on success, -1 on error. */ int -qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -380,7 +376,7 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, if (!priv->dbusVMState) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorDelObject(priv->mon, qemuDomainGetDBusVMStateAlias(), true); @@ -388,7 +384,7 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, if (ret == 0) priv->dbusVMState = false; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; @@ -397,7 +393,6 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, /** * qemuHotplugAttachManagedPR: - * @driver: QEMU driver object * @vm: domain object * @src: new disk source to be attached to @vm * @asyncJob: asynchronous job identifier @@ -408,8 +403,7 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, * Returns: 0 on success, -1 on error. */ static int -qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuHotplugAttachManagedPR(virDomainObjPtr vm, virStorageSourcePtr src, qemuDomainAsyncJob asyncJob) { @@ -431,12 +425,12 @@ qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, daemonStarted = true; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorAddObject(priv->mon, &props, NULL); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; ret = 0; @@ -451,7 +445,6 @@ qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, /** * qemuHotplugRemoveManagedPR: - * @driver: QEMU driver object * @vm: domain object * @asyncJob: asynchronous job identifier * @@ -459,8 +452,7 @@ qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, * it any more. */ static int -qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuHotplugRemoveManagedPR(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -472,11 +464,11 @@ qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ignore_value(qemuMonitorDelObject(priv->mon, qemuDomainGetManagedPRAlias(), false)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; qemuProcessKillManagedPRDaemon(vm); @@ -490,7 +482,6 @@ qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, /** * qemuDomainChangeMediaBlockdev: - * @driver: qemu driver structure * @vm: domain definition * @disk: disk definition to change the source of * @oldsrc: old source definition @@ -505,8 +496,7 @@ qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, * Returns 0 on success, -1 on error and reports libvirt error */ static int -qemuDomainChangeMediaBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeMediaBlockdev(virDomainObjPtr vm, virDomainDiskDefPtr disk, virStorageSourcePtr oldsrc, virStorageSourcePtr newsrc, @@ -533,16 +523,16 @@ qemuDomainChangeMediaBlockdev(virQEMUDriverPtr driver, } if (diskPriv->tray && disk->tray_status != VIR_DOMAIN_DISK_TRAY_OPEN) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorBlockdevTrayOpen(priv->mon, diskPriv->qomName, force); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (!force && qemuHotplugWaitForTrayEject(vm, disk) < 0) return -1; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorBlockdevMediumRemove(priv->mon, diskPriv->qomName); @@ -564,7 +554,7 @@ qemuDomainChangeMediaBlockdev(virQEMUDriverPtr driver, if (rc < 0 && newbackend) qemuBlockStorageSourceChainDetach(priv->mon, newbackend); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; return 0; @@ -628,13 +618,13 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0) goto cleanup; - if (qemuHotplugAttachManagedPR(driver, vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachManagedPR(vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) - rc = qemuDomainChangeMediaBlockdev(driver, vm, disk, oldsrc, newsrc, force); + rc = qemuDomainChangeMediaBlockdev(vm, disk, oldsrc, newsrc, force); else - rc = qemuDomainChangeMediaLegacy(driver, vm, disk, newsrc, force); + rc = qemuDomainChangeMediaLegacy(vm, disk, newsrc, force); virDomainAuditDisk(vm, oldsrc, newsrc, "update", rc >= 0); @@ -664,7 +654,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, /* remove PR manager object if unneeded */ if (managedpr) - ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE)); /* revert old image do the disk definition */ if (oldsrc) @@ -726,10 +716,10 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->disks, vm->def->ndisks + 1) < 0) goto cleanup; - if (qemuHotplugAttachManagedPR(driver, vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachManagedPR(vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuBlockStorageSourceChainAttach(priv->mon, data) < 0) goto exit_monitor; @@ -764,7 +754,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, VIR_WARN("failed to set blkdeviotune for '%s' of '%s'", disk->dst, vm->def->name); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -2; goto cleanup; } @@ -785,11 +775,11 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, ignore_value(qemuMonitorBlockdevDel(priv->mon, corAlias)); qemuBlockStorageSourceChainDetach(priv->mon, data); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -2; if (virStorageSourceChainHasManagedPR(disk->src) && - qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0) ret = -2; virDomainAuditDisk(vm, NULL, disk->src, "attach", false); @@ -820,8 +810,7 @@ qemuDomainAttachVirtioDiskDevice(virQEMUDriverPtr driver, } -int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachControllerDevice(virDomainObjPtr vm, virDomainControllerDefPtr controller) { int ret = -1; @@ -869,7 +858,7 @@ int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->controllers, vm->def->ncontrollers+1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if ((ret = qemuDomainAttachExtensionDevice(priv->mon, &controller->info)) < 0) { @@ -880,7 +869,7 @@ int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, ignore_value(qemuDomainDetachExtensionDevice(priv->mon, &controller->info)); exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; ret = -1; goto cleanup; @@ -897,8 +886,7 @@ int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, } static virDomainControllerDefPtr -qemuDomainFindOrCreateSCSIDiskController(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainFindOrCreateSCSIDiskController(virDomainObjPtr vm, int controller) { size_t i; @@ -939,7 +927,7 @@ qemuDomainFindOrCreateSCSIDiskController(virQEMUDriverPtr driver, VIR_INFO("No SCSI controller present, hotplugging one model=%s", virDomainControllerModelSCSITypeToString(cont->model)); - if (qemuDomainAttachControllerDevice(driver, vm, cont) < 0) { + if (qemuDomainAttachControllerDevice(vm, cont) < 0) { VIR_FREE(cont); return NULL; } @@ -985,7 +973,7 @@ qemuDomainAttachSCSIDisk(virQEMUDriverPtr driver, * exist; there must not be any missing index in between. */ for (i = 0; i <= disk->info.addr.drive.controller; i++) { - if (!qemuDomainFindOrCreateSCSIDiskController(driver, vm, i)) + if (!qemuDomainFindOrCreateSCSIDiskController(vm, i)) return -1; } @@ -1391,11 +1379,11 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, slirpfdName))) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (actualType == VIR_DOMAIN_NET_TYPE_VHOSTUSER) { if (qemuMonitorAttachCharDev(priv->mon, charDevAlias, net->data.vhostuser) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto cleanup; } @@ -1406,13 +1394,13 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, tapfd, tapfdName, tapfdSize, vhostfd, vhostfdName, vhostfdSize, slirpfd, slirpfdName) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } netdevPlugged = true; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; for (i = 0; i < tapfdSize; i++) @@ -1424,21 +1412,21 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, queueSize, priv->qemuCaps))) goto try_remove; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuDomainAttachExtensionDevice(priv->mon, &net->info) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } if (qemuMonitorAddDevice(priv->mon, nicstr) < 0) { ignore_value(qemuDomainDetachExtensionDevice(priv->mon, &net->info)); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; /* set link state */ @@ -1447,15 +1435,15 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("device alias not found: cannot set link state to down")); } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorSetLink(priv->mon, net->info.alias, VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; } /* link set to down */ @@ -1528,7 +1516,7 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, netdev_name = g_strdup_printf("host%s", net->info.alias); if (QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp) qemuSlirpStop(QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp, vm, driver, net); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (charDevPlugged && qemuMonitorDetachCharDev(priv->mon, charDevAlias) < 0) VIR_WARN("Failed to remove associated chardev %s", charDevAlias); @@ -1536,7 +1524,7 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, qemuMonitorRemoveNetdev(priv->mon, netdev_name) < 0) VIR_WARN("Failed to remove network backend for netdev %s", netdev_name); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&originalError); goto cleanup; } @@ -1634,7 +1622,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, if (!(devstr = qemuBuildPCIHostdevDevStr(vm->def, hostdev, 0, priv->qemuCaps))) goto error; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if ((ret = qemuDomainAttachExtensionDevice(priv->mon, hostdev->info)) < 0) goto exit_monitor; @@ -1643,7 +1631,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, ignore_value(qemuDomainDetachExtensionDevice(priv->mon, hostdev->info)); exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto error; virDomainAuditHostdev(vm, hostdev, "attach", ret == 0); @@ -1676,8 +1664,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, void -qemuDomainDelTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainDelTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, const char *secAlias, const char *tlsAlias) @@ -1690,7 +1677,7 @@ qemuDomainDelTLSObjects(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; if (tlsAlias) @@ -1699,7 +1686,7 @@ qemuDomainDelTLSObjects(virQEMUDriverPtr driver, if (secAlias) ignore_value(qemuMonitorDelObject(priv->mon, secAlias, false)); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); cleanup: virErrorRestore(&orig_err); @@ -1707,8 +1694,7 @@ qemuDomainDelTLSObjects(virQEMUDriverPtr driver, int -qemuDomainAddTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainAddTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virJSONValuePtr *secProps, virJSONValuePtr *tlsProps) @@ -1720,7 +1706,7 @@ qemuDomainAddTLSObjects(virQEMUDriverPtr driver, if (!tlsProps && !secProps) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (secProps && *secProps && @@ -1731,13 +1717,13 @@ qemuDomainAddTLSObjects(virQEMUDriverPtr driver, qemuMonitorAddObject(priv->mon, tlsProps, NULL) < 0) goto error; - return qemuDomainObjExitMonitor(driver, vm); + return qemuDomainObjExitMonitor(vm); error: virErrorPreserveLast(&orig_err); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, NULL); + qemuDomainDelTLSObjects(vm, asyncJob, secAlias, NULL); return -1; } @@ -1816,7 +1802,7 @@ qemuDomainAddChardevTLSObjects(virQEMUDriverPtr driver, goto cleanup; dev->data.tcp.tlscreds = true; - if (qemuDomainAddTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuDomainAddTLSObjects(vm, QEMU_ASYNC_JOB_NONE, &secProps, &tlsProps) < 0) goto cleanup; @@ -1857,13 +1843,13 @@ qemuDomainDelChardevTLSObjects(virQEMUDriverPtr driver, !(secAlias = qemuAliasForSecret(inAlias, NULL))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorDelObject(priv->mon, tlsAlias, false)); if (secAlias) ignore_value(qemuMonitorDelObject(priv->mon, secAlias, false)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return 0; @@ -1906,7 +1892,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, &tlsAlias, &secAlias) < 0) goto audit; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAttachCharDev(priv->mon, charAlias, @@ -1917,7 +1903,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, if (qemuMonitorAddDevice(priv->mon, devstr) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto audit; def->redirdevs[def->nredirdevs++] = redirdev; @@ -1934,9 +1920,9 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, /* detach associated chardev on error */ if (chardevAdded) ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2169,7 +2155,7 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, &tlsAlias, &secAlias) < 0) goto audit; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAttachCharDev(priv->mon, charAlias, chr->source) < 0) goto exit_monitor; @@ -2186,7 +2172,7 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto audit; qemuDomainChrInsertPreAlloced(vmdef, chr); @@ -2213,10 +2199,10 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, /* detach associated chardev on error */ if (chardevAttached) qemuMonitorDetachCharDev(priv->mon, charAlias); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2278,7 +2264,7 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, goto audit; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (rng->backend == VIR_DOMAIN_RNG_BACKEND_EGD && qemuMonitorAttachCharDev(priv->mon, charAlias, @@ -2297,7 +2283,7 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -2327,11 +2313,11 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, ignore_value(qemuMonitorDelObject(priv->mon, objAlias, false)); if (rng->backend == VIR_DOMAIN_RNG_BACKEND_EGD && chardevAdded) ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) releaseaddr = false; virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2411,7 +2397,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, if (qemuDomainAdjustMaxMemLock(vm, false) < 0) goto removedef; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAddObject(priv->mon, &props, NULL) < 0) goto exit_monitor; objAdded = true; @@ -2419,7 +2405,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, if (qemuMonitorAddDevice(priv->mon, devstr) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { /* we shouldn't touch mem now, as the def might be freed */ mem = NULL; goto audit; @@ -2429,14 +2415,13 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, virObjectEventStateQueue(driver->domainEventState, event); /* fix the balloon size */ - ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE)); /* mem is consumed by vm->def */ mem = NULL; /* this step is best effort, removing the device would be so much trouble */ - ignore_value(qemuDomainUpdateMemoryDeviceInfo(driver, vm, - QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuDomainUpdateMemoryDeviceInfo(vm, QEMU_ASYNC_JOB_NONE)); ret = 0; @@ -2461,7 +2446,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); if (objAdded) ignore_value(qemuMonitorDelObject(priv->mon, objalias, false)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) mem = NULL; if (objAdded && mem) @@ -2527,9 +2512,9 @@ qemuDomainAttachHostUSBDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs+1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorAddDevice(priv->mon, devstr); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -1; goto cleanup; } @@ -2582,7 +2567,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, * exist; there must not be any missing index in between. */ for (i = 0; i <= hostdev->info->addr.drive.controller; i++) { - if (!qemuDomainFindOrCreateSCSIDiskController(driver, vm, i)) + if (!qemuDomainFindOrCreateSCSIDiskController(vm, i)) return -1; } @@ -2617,7 +2602,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs + 1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuBlockStorageSourceAttachApply(priv->mon, data) < 0) goto exit_monitor; @@ -2625,7 +2610,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, if (qemuMonitorAddDevice(priv->mon, devstr) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; virDomainAuditHostdev(vm, hostdev, "attach", true); @@ -2652,7 +2637,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, exit_monitor: virErrorPreserveLast(&orig_err); qemuBlockStorageSourceAttachRollback(priv->mon, data); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); virDomainAuditHostdev(vm, hostdev, "attach", false); @@ -2729,7 +2714,7 @@ qemuDomainAttachSCSIVHostDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs + 1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if ((ret = qemuDomainAttachExtensionDevice(priv->mon, hostdev->info)) < 0) goto exit_monitor; @@ -2741,7 +2726,7 @@ qemuDomainAttachSCSIVHostDevice(virQEMUDriverPtr driver, } exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0 || ret < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || ret < 0) goto audit; vm->def->hostdevs[vm->def->nhostdevs++] = hostdev; @@ -2836,9 +2821,9 @@ qemuDomainAttachMediatedDevice(virQEMUDriverPtr driver, goto cleanup; teardownmemlock = true; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorAddDevice(priv->mon, devstr); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -1; goto cleanup; } @@ -2977,7 +2962,7 @@ qemuDomainAttachShmemDevice(virQEMUDriverPtr driver, } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (shmem->server.enabled) { if (qemuMonitorAttachCharDev(priv->mon, charAlias, @@ -2998,7 +2983,7 @@ qemuDomainAttachShmemDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { release_address = false; goto cleanup; } @@ -3030,7 +3015,7 @@ qemuDomainAttachShmemDevice(virQEMUDriverPtr driver, ignore_value(qemuMonitorDelObject(priv->mon, memAlias, false)); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) release_address = false; virErrorRestore(&orig_err); @@ -3084,14 +3069,14 @@ qemuDomainAttachWatchdog(virQEMUDriverPtr driver, actionStr = virDomainWatchdogActionTypeToString(actualAction); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rv = qemuMonitorSetWatchdogAction(priv->mon, actionStr); if (rv >= 0) rv = qemuMonitorAddDevice(priv->mon, watchdogstr); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseAddress = false; goto cleanup; } @@ -3111,8 +3096,7 @@ qemuDomainAttachWatchdog(virQEMUDriverPtr driver, int -qemuDomainAttachInputDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainAttachInputDevice(virDomainObjPtr vm, virDomainInputDefPtr input) { int ret = -1; @@ -3164,7 +3148,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->inputs, vm->def->ninputs + 1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuDomainAttachExtensionDevice(priv->mon, &input->info) < 0) goto exit_monitor; @@ -3174,7 +3158,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -3203,7 +3187,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, return ret; exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -3212,8 +3196,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, int -qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainAttachVsockDevice(virDomainObjPtr vm, virDomainVsockDefPtr vsock) { qemuDomainVsockPrivatePtr vsockPriv = (qemuDomainVsockPrivatePtr)vsock->privateData; @@ -3247,7 +3230,7 @@ qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, if (!(devstr = qemuBuildVsockDevStr(vm->def, vsock, priv->qemuCaps, fdprefix))) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuDomainAttachExtensionDevice(priv->mon, &vsock->info) < 0) goto exit_monitor; @@ -3257,7 +3240,7 @@ qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -3277,7 +3260,7 @@ qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, return ret; exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) releaseaddr = false; goto cleanup; } @@ -3420,8 +3403,7 @@ qemuDomainChangeNetFilter(virDomainObjPtr vm, return 0; } -int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeNetLinkState(virDomainObjPtr vm, virDomainNetDefPtr dev, int linkstate) { @@ -3436,7 +3418,7 @@ int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, VIR_DEBUG("dev: %s, state: %d", dev->info.alias, linkstate); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSetLink(priv->mon, dev->info.alias, linkstate); if (ret < 0) @@ -3446,15 +3428,14 @@ int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, dev->linkstate = linkstate; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; } int -qemuDomainChangeNet(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeNet(virDomainObjPtr vm, virDomainDeviceDefPtr dev) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3878,7 +3859,7 @@ qemuDomainChangeNet(virQEMUDriverPtr driver, } if (needLinkStateChange && - qemuDomainChangeNetLinkState(driver, vm, olddev, newdev->linkstate) < 0) { + qemuDomainChangeNetLinkState(vm, olddev, newdev->linkstate) < 0) { goto cleanup; } @@ -3969,8 +3950,7 @@ qemuDomainFindGraphicsIndex(virDomainDefPtr def, int -qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeGraphicsPasswords(virDomainObjPtr vm, int type, virDomainGraphicsAuthDefPtr auth, const char *defaultPasswd, @@ -3992,7 +3972,7 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, if (auth->connected) connected = virDomainGraphicsAuthConnectedTypeToString(auth->connected); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return ret; ret = qemuMonitorSetPassword(priv->mon, type, password, connected); @@ -4012,7 +3992,7 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, ret = qemuMonitorExpirePassword(priv->mon, type, expire); end_job: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -4116,8 +4096,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver, dev->data.vnc.auth.passwd)) { VIR_DEBUG("Updating password on VNC server %p %p", dev->data.vnc.auth.passwd, cfg->vncPassword); - if (qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_VNC, + if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC, &dev->data.vnc.auth, cfg->vncPassword, QEMU_ASYNC_JOB_NONE) < 0) @@ -4164,8 +4143,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver, dev->data.spice.auth.passwd)) { VIR_DEBUG("Updating password on SPICE server %p %p", dev->data.spice.auth.passwd, cfg->spicePassword); - if (qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_SPICE, + if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE, &dev->data.spice.auth, cfg->spicePassword, QEMU_ASYNC_JOB_NONE) < 0) @@ -4279,7 +4257,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, } } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (corAlias) ignore_value(qemuMonitorBlockdevDel(priv->mon, corAlias)); @@ -4287,7 +4265,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, if (diskBackend) qemuBlockStorageSourceChainDetach(priv->mon, diskBackend); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; virDomainAuditDisk(vm, disk->src, NULL, "detach", true); @@ -4303,7 +4281,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, ignore_value(qemuRemoveSharedDevice(driver, &dev, vm->def->name)); if (virStorageSourceChainHasManagedPR(disk->src) && - qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; ret = 0; @@ -4353,9 +4331,9 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, backendAlias = g_strdup_printf("mem%s", mem->info.alias); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDelObject(priv->mon, backendAlias, true); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rc = -1; virDomainAuditMemory(vm, oldmem, newmem, "update", rc == 0); @@ -4380,7 +4358,7 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, virDomainMemoryDefFree(mem); /* fix the balloon size */ - ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE)); /* decrease the mlock limit after memory unplug if necessary */ ignore_value(qemuDomainAdjustMaxMemLock(vm, false)); @@ -4451,9 +4429,9 @@ qemuDomainRemoveHostDevice(virQEMUDriverPtr driver, detachscsi = qemuBuildHostdevSCSIDetachPrepare(hostdev, priv->qemuCaps); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); qemuBlockStorageSourceAttachRollback(priv->mon, detachscsi); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } @@ -4564,9 +4542,9 @@ qemuDomainRemoveNetDevice(virQEMUDriverPtr driver, */ ignore_value(qemuInterfaceStopDevice(net)); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorRemoveNetdev(priv->mon, hostnet_name) < 0) { - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; virDomainAuditNet(vm, net, NULL, "detach", false); return -1; @@ -4582,7 +4560,7 @@ qemuDomainRemoveNetDevice(virQEMUDriverPtr driver, } } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp) @@ -4650,9 +4628,9 @@ qemuDomainRemoveChrDevice(virQEMUDriverPtr driver, return -1; if (monitor) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDetachCharDev(priv->mon, charAlias); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } @@ -4709,7 +4687,7 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, if (!(charAlias = qemuAliasChardevFromDevAlias(rng->info.alias))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorDelObject(priv->mon, objAlias, true) < 0) rc = -1; @@ -4719,7 +4697,7 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, qemuMonitorDetachCharDev(priv->mon, charAlias) < 0) rc = -1; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rng->backend == VIR_DOMAIN_RNG_BACKEND_EGD && @@ -4748,8 +4726,7 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, static int -qemuDomainRemoveShmemDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRemoveShmemDevice(virDomainObjPtr vm, virDomainShmemDefPtr shmem) { int rc; @@ -4767,14 +4744,14 @@ qemuDomainRemoveShmemDevice(virQEMUDriverPtr driver, memAlias = g_strdup_printf("shmmem-%s", shmem->info.alias); } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (shmem->server.enabled) rc = qemuMonitorDetachCharDev(priv->mon, charAlias); else rc = qemuMonitorDelObject(priv->mon, memAlias, true); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; virDomainAuditShmem(vm, shmem, "detach", rc == 0); @@ -4863,13 +4840,13 @@ qemuDomainRemoveRedirdevDevice(virQEMUDriverPtr driver, if (!(charAlias = qemuAliasChardevFromDevAlias(dev->info.alias))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); /* DeviceDel from Detach may remove chardev, * so we cannot rely on return status to delete TLS chardevs. */ ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (qemuDomainDelChardevTLSObjects(driver, vm, dev->source, charAlias) < 0) @@ -5009,7 +4986,7 @@ qemuDomainRemoveDevice(virQEMUDriverPtr driver, return -1; break; case VIR_DOMAIN_DEVICE_SHMEM: - if (qemuDomainRemoveShmemDevice(driver, vm, dev->data.shmem) < 0) + if (qemuDomainRemoveShmemDevice(vm, dev->data.shmem) < 0) return -1; break; case VIR_DOMAIN_DEVICE_INPUT: @@ -5554,9 +5531,9 @@ qemuDomainDetachDeviceChr(virQEMUDriverPtr driver, if (guestfwd) { int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorRemoveNetdev(priv->mon, tmpChr->info.alias); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rc = -1; if (rc < 0) @@ -5915,8 +5892,7 @@ qemuDomainDetachDeviceLive(virDomainObjPtr vm, static int -qemuDomainRemoveVcpu(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRemoveVcpu(virDomainObjPtr vm, unsigned int vcpu) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5927,7 +5903,7 @@ qemuDomainRemoveVcpu(virQEMUDriverPtr driver, virErrorPtr save_error = NULL; size_t i; - if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0) return -1; /* validation requires us to set the expected state prior to calling it */ @@ -5961,8 +5937,7 @@ qemuDomainRemoveVcpu(virQEMUDriverPtr driver, void -qemuDomainRemoveVcpuAlias(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRemoveVcpuAlias(virDomainObjPtr vm, const char *alias) { virDomainVcpuDefPtr vcpu; @@ -5974,7 +5949,7 @@ qemuDomainRemoveVcpuAlias(virQEMUDriverPtr driver, vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu); if (STREQ_NULLABLE(alias, vcpupriv->alias)) { - qemuDomainRemoveVcpu(driver, vm, i); + qemuDomainRemoveVcpu(vm, i); return; } } @@ -6017,7 +5992,7 @@ qemuDomainHotplugDelVcpu(virQEMUDriverPtr driver, goto cleanup; } - if (qemuDomainRemoveVcpu(driver, vm, vcpu) < 0) + if (qemuDomainRemoveVcpu(vm, vcpu) < 0) goto cleanup; qemuDomainVcpuPersistOrder(vm->def); @@ -6056,7 +6031,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, goto cleanup; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (newhotplug) { rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops); @@ -6065,7 +6040,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, rc = qemuMonitorSetCPU(qemuDomainGetMonitor(vm), vcpu, true); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; virDomainAuditVcpu(vm, oldvcpus, oldvcpus + nvcpus, "update", rc == 0); @@ -6077,7 +6052,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, if (newhotplug) vm->def->individualvcpus = true; - if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0) goto cleanup; /* validation requires us to set the expected state prior to calling it */ diff --git a/src/qemu/qemu_hotplug.h b/src/qemu/qemu_hotplug.h index 6287c5b5e8..51af92f840 100644 --- a/src/qemu/qemu_hotplug.h +++ b/src/qemu/qemu_hotplug.h @@ -31,14 +31,12 @@ int qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, virStorageSourcePtr newsrc, bool force); -void qemuDomainDelTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +void qemuDomainDelTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, const char *secAlias, const char *tlsAlias); -int qemuDomainAddTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAddTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virJSONValuePtr *secProps, virJSONValuePtr *tlsProps); @@ -52,8 +50,7 @@ int qemuDomainGetTLSObjects(virQEMUCapsPtr qemuCaps, virJSONValuePtr *tlsProps, virJSONValuePtr *secProps); -int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachControllerDevice(virDomainObjPtr vm, virDomainControllerDefPtr controller); int qemuDomainAttachDeviceDiskLive(virQEMUDriverPtr driver, virDomainObjPtr vm, @@ -81,26 +78,21 @@ int qemuDomainAttachMemory(virQEMUDriverPtr driver, int qemuDomainChangeGraphics(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainGraphicsDefPtr dev); -int qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeGraphicsPasswords(virDomainObjPtr vm, int type, virDomainGraphicsAuthDefPtr auth, const char *defaultPasswd, int asyncJob); -int qemuDomainChangeNet(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeNet(virDomainObjPtr vm, virDomainDeviceDefPtr dev); -int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeNetLinkState(virDomainObjPtr vm, virDomainNetDefPtr dev, int linkstate); -int qemuDomainAttachInputDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachInputDevice(virDomainObjPtr vm, virDomainInputDefPtr input); -int qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachVsockDevice(virDomainObjPtr vm, virDomainVsockDefPtr vsock); int qemuDomainAttachLease(virQEMUDriverPtr driver, @@ -118,8 +110,7 @@ int qemuDomainDetachDeviceLive(virDomainObjPtr vm, virQEMUDriverPtr driver, bool async); -void qemuDomainRemoveVcpuAlias(virQEMUDriverPtr driver, - virDomainObjPtr vm, +void qemuDomainRemoveVcpuAlias(virDomainObjPtr vm, const char *alias); int @@ -157,6 +148,5 @@ int qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); -int qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index a45f87137f..6b2978f745 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -82,21 +82,18 @@ VIR_ENUM_IMPL(virMigrationJobPhase, ); static int -qemuMigrationJobStart(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStart(virDomainObjPtr vm, qemuDomainAsyncJob job, unsigned long apiFlags) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; static void -qemuMigrationJobSetPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobSetPhase(virDomainObjPtr vm, virMigrationJobPhase phase) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); static void -qemuMigrationJobStartPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStartPhase(virDomainObjPtr vm, virMigrationJobPhase phase) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); @@ -110,8 +107,7 @@ qemuMigrationJobIsActive(virDomainObjPtr vm, ATTRIBUTE_NONNULL(1); static void -qemuMigrationJobFinish(virQEMUDriverPtr driver, - virDomainObjPtr obj) +qemuMigrationJobFinish(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); static void @@ -424,8 +420,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, devicename = diskAlias; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) goto cleanup; if (port == 0) { @@ -441,7 +436,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, if (qemuMonitorNBDServerAdd(priv->mon, devicename, exportname, true, NULL) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; } @@ -454,14 +449,13 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, return ret; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } static int -qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstStopNBDServer(virDomainObjPtr vm, qemuMigrationCookiePtr mig) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -469,13 +463,12 @@ qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver, if (!mig->nbd) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) return -1; if (qemuMonitorNBDServerStop(priv->mon) < 0) VIR_WARN("Unable to stop NBD server"); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; virPortAllocatorRelease(priv->nbdPort); @@ -646,8 +639,7 @@ qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm, * -1 on error or when job failed and failNoJob is true. */ static int -qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDCopyCancelOne(virDomainObjPtr vm, virDomainDiskDefPtr disk, qemuBlockJobDataPtr job, bool failNoJob, @@ -669,12 +661,12 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, return 1; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rv = qemuMonitorBlockJobCancel(priv->mon, job->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; return 0; @@ -683,7 +675,6 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, /** * qemuMigrationSrcNBDCopyCancel: - * @driver: qemu driver * @vm: domain * @check: if true report an error when some of the mirrors fails * @@ -695,8 +686,7 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, * Returns 0 on success, -1 otherwise. */ static int -qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDCopyCancel(virDomainObjPtr vm, bool check, qemuDomainAsyncJob asyncJob, virConnectPtr dconn) @@ -723,7 +713,7 @@ qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver, continue; } - rv = qemuMigrationSrcNBDCopyCancelOne(driver, vm, disk, job, + rv = qemuMigrationSrcNBDCopyCancelOne(vm, disk, job, check, asyncJob); if (rv != 0) { if (rv < 0) { @@ -766,7 +756,7 @@ qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver, if (!diskPriv->migrSource) continue; - qemuBlockStorageSourceDetachOneBlockdev(driver, vm, asyncJob, + qemuBlockStorageSourceDetachOneBlockdev(vm, asyncJob, diskPriv->migrSource); virObjectUnref(diskPriv->migrSource); diskPriv->migrSource = NULL; @@ -818,8 +808,7 @@ qemuMigrationSrcNBDStorageCopyBlockdevPrepareSource(virDomainDiskDefPtr disk, static int -qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDStorageCopyBlockdev(virDomainObjPtr vm, virDomainDiskDefPtr disk, const char *jobname, const char *sourcename, @@ -847,8 +836,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, false))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) return -1; mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data); @@ -861,7 +849,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, if (mon_ret != 0) qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || mon_ret < 0) return -1; diskPriv->migrSource = g_steal_pointer(©src); @@ -871,8 +859,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, static int -qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDStorageCopyDriveMirror(virDomainObjPtr vm, const char *diskAlias, const char *host, int port, @@ -890,15 +877,14 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver, diskAlias); } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) return -1; mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm), diskAlias, nbd_dest, "raw", mirror_speed, 0, 0, mirror_shallow, true); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || mon_ret < 0) return -1; return 0; @@ -906,8 +892,7 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver, static int -qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDStorageCopyOne(virDomainObjPtr vm, virDomainDiskDefPtr disk, const char *host, int port, @@ -946,15 +931,14 @@ qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_TLS || virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) { - rc = qemuMigrationSrcNBDStorageCopyBlockdev(driver, vm, - disk, jobname, + rc = qemuMigrationSrcNBDStorageCopyBlockdev(vm, disk, jobname, sourcename, persistjob, host, port, mirror_speed, mirror_shallow, tlsAlias); } else { - rc = qemuMigrationSrcNBDStorageCopyDriveMirror(driver, vm, diskAlias, + rc = qemuMigrationSrcNBDStorageCopyDriveMirror(vm, diskAlias, host, port, mirror_speed, mirror_shallow); @@ -1037,7 +1021,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks)) continue; - if (qemuMigrationSrcNBDStorageCopyOne(driver, vm, disk, host, port, + if (qemuMigrationSrcNBDStorageCopyOne(vm, disk, host, port, mirror_speed, mirror_shallow, tlsAlias, flags) < 0) return -1; @@ -1070,7 +1054,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, return -1; } - qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->current); /* Okay, all disks are ready. Modify migrate_flags */ @@ -1492,8 +1476,7 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) int -qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationAnyFetchStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo, char **error) @@ -1502,12 +1485,12 @@ qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, qemuMonitorMigrationStats stats; int rv; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rv = qemuMonitorGetMigrationStats(priv->mon, &stats, error); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; jobInfo->stats.mig = stats; @@ -1546,8 +1529,7 @@ qemuMigrationJobName(virDomainObjPtr vm) static int -qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobCheckStatus(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -1559,7 +1541,7 @@ qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, if (!events || jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { - if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0) + if (qemuMigrationAnyFetchStats(vm, asyncJob, jobInfo, &error) < 0) return -1; } @@ -1615,8 +1597,7 @@ enum qemuMigrationCompletedFlags { * -2 something else failed, we need to cancel migration. */ static int -qemuMigrationAnyCompleted(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationAnyCompleted(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virConnectPtr dconn, unsigned int flags) @@ -1626,7 +1607,7 @@ qemuMigrationAnyCompleted(virQEMUDriverPtr driver, qemuDomainJobInfoPtr jobInfo = jobPriv->current; int pauseReason; - if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0) + if (qemuMigrationJobCheckStatus(vm, asyncJob) < 0) goto error; /* This flag should only be set when run on src host */ @@ -1707,8 +1688,7 @@ qemuMigrationAnyCompleted(virQEMUDriverPtr driver, * QEMU reports failed migration. */ static int -qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virConnectPtr dconn, unsigned int flags) @@ -1721,7 +1701,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; - while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob, + while ((rv = qemuMigrationAnyCompleted(vm, asyncJob, dconn, flags)) != 1) { if (rv < 0) return rv; @@ -1743,7 +1723,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, } if (events) - ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL)); + ignore_value(qemuMigrationAnyFetchStats(vm, asyncJob, jobInfo, NULL)); qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobInfoUpdateDowntime(jobInfo); @@ -1760,8 +1740,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, static int -qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstWaitForCompletion(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, bool postcopy) { @@ -1777,7 +1756,7 @@ qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver, if (postcopy) flags = QEMU_MIGRATION_COMPLETED_POSTCOPY; - while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob, + while ((rv = qemuMigrationAnyCompleted(vm, asyncJob, NULL, flags)) != 1) { if (rv < 0 || virDomainObjWait(vm) < 0) return -1; @@ -1788,8 +1767,7 @@ qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver, static int -qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcGraphicsRelocate(virDomainObjPtr vm, qemuMigrationCookiePtr cookie, const char *graphicsuri) { @@ -1871,14 +1849,13 @@ qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver, goto cleanup; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress, port, tlsPort, tlsSubject); jobPriv->spiceMigration = !ret; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; } @@ -1963,8 +1940,7 @@ qemuMigrationDstGetURI(const char *migrateFrom, int -qemuMigrationDstRun(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstRun(virDomainObjPtr vm, const char *uri, qemuDomainAsyncJob asyncJob) { @@ -1973,7 +1949,7 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, VIR_DEBUG("Setting up incoming migration with URI %s", uri); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rv = qemuMonitorSetDBusVMStateIdList(priv->mon, @@ -1984,7 +1960,7 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, rv = qemuMonitorMigrateIncoming(priv->mon, uri); exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) { @@ -1992,7 +1968,7 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, return 0; } - if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0) + if (qemuMigrationDstWaitForCompletion(vm, asyncJob, false) < 0) return -1; return 0; @@ -2008,9 +1984,8 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, static void qemuMigrationSrcCleanup(virDomainObjPtr vm, virConnectPtr conn, - void *opaque) + void *opaque G_GNUC_UNUSED) { - virQEMUDriverPtr driver = opaque; qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; @@ -2030,17 +2005,17 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, switch ((virMigrationJobPhase) priv->job.phase) { case VIR_MIGRATION_PHASE_BEGIN3: /* just forget we were about to migrate */ - qemuDomainObjDiscardAsyncJob(driver, vm); + qemuDomainObjDiscardAsyncJob(vm); break; case VIR_MIGRATION_PHASE_PERFORM3_DONE: VIR_WARN("Migration of domain %s finished but we don't know if the" " domain was successfully started on destination or not", vm->def->name); - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); /* clear the job and let higher levels decide what to do */ - qemuDomainObjDiscardAsyncJob(driver, vm); + qemuDomainObjDiscardAsyncJob(vm); break; case VIR_MIGRATION_PHASE_PERFORM3: @@ -2091,7 +2066,7 @@ qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver, * change protection. */ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_BEGIN3); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_BEGIN3); if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags)) return NULL; @@ -2233,12 +2208,12 @@ qemuMigrationSrcBegin(virConnectPtr conn, qemuDomainAsyncJob asyncJob; if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) goto cleanup; asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT; } else { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; asyncJob = QEMU_ASYNC_JOB_NONE; } @@ -2252,7 +2227,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, * We don't want to require them on the destination. */ if (!(flags & VIR_MIGRATE_OFFLINE) && - qemuProcessRefreshDisks(driver, vm, asyncJob) < 0) + qemuProcessRefreshDisks(vm, asyncJob) < 0) goto endjob; if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname, @@ -2281,9 +2256,9 @@ qemuMigrationSrcBegin(virConnectPtr conn, endjob: if (flags & VIR_MIGRATE_CHANGE_PROTECTION) - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); else - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); goto cleanup; } @@ -2308,7 +2283,7 @@ qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver, if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) return; - qemuDomainObjDiscardAsyncJob(driver, vm); + qemuDomainObjDiscardAsyncJob(vm); } static qemuProcessIncomingDefPtr @@ -2547,10 +2522,9 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0) goto cleanup; - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, - flags) < 0) + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_IN, flags) < 0) goto cleanup; - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_PREPARE); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PREPARE); /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; @@ -2607,7 +2581,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, goto stopjob; } - if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_IN, migParams, mig->caps->automatic) < 0) goto stopjob; @@ -2624,7 +2598,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, goto stopjob; } - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_IN, migParams) < 0) goto stopjob; @@ -2661,7 +2635,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, } if (incoming->deferredURI && - qemuMigrationDstRun(driver, vm, incoming->deferredURI, + qemuMigrationDstRun(vm, incoming->deferredURI, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) goto stopjob; @@ -2731,7 +2705,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, return ret; stopjob: - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN, jobPriv->migParams, priv->job.apiFlags); if (stopProcess) { @@ -2743,7 +2717,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags); } - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); goto cleanup; } @@ -3011,7 +2985,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virCheckFlags(QEMU_MIGRATION_FLAGS, -1); - qemuMigrationJobSetPhase(driver, vm, retcode == 0 + qemuMigrationJobSetPhase(vm, retcode == 0 ? VIR_MIGRATION_PHASE_CONFIRM3 : VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED); @@ -3035,7 +3009,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, */ if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY && - qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobInfo, NULL) < 0) VIR_WARN("Could not refresh migration statistics"); @@ -3074,8 +3048,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); /* cancel any outstanding NBD jobs */ - qemuMigrationSrcNBDCopyCancel(driver, vm, false, - QEMU_ASYNC_JOB_MIGRATION_OUT, NULL); + qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_MIGRATION_OUT, NULL); virErrorRestore(&orig_err); @@ -3085,7 +3058,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, else qemuMigrationSrcRestoreDomainState(driver, vm); - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) @@ -3117,7 +3090,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, else phase = VIR_MIGRATION_PHASE_CONFIRM3; - qemuMigrationJobStartPhase(driver, vm, phase); + qemuMigrationJobStartPhase(vm, phase); virCloseCallbacksUnset(driver->closeCallbacks, vm, qemuMigrationSrcCleanup); @@ -3125,7 +3098,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, cookiein, cookieinlen, flags, cancelled); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); if (!virDomainObjIsActive(vm)) { if (!cancelled && ret == 0 && flags & VIR_MIGRATE_UNDEFINE_SOURCE) { virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm); @@ -3411,20 +3384,19 @@ qemuMigrationSrcConnect(virQEMUDriverPtr driver, static int -qemuMigrationSrcContinue(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcContinue(virDomainObjPtr vm, qemuMonitorMigrationStatus status, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorMigrateContinue(priv->mon, status); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -3443,18 +3415,18 @@ qemuMigrationSetDBusVMState(virQEMUDriverPtr driver, if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; rv = qemuMonitorSetDBusVMStateIdList(priv->mon, (const char **)priv->dbusVMStateIds); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rv = -1; return rv; } else { - if (qemuHotplugRemoveDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugRemoveDBusVMState(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; } @@ -3554,10 +3526,10 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (!mig) goto error; - if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0) + if (qemuMigrationSrcGraphicsRelocate(vm, mig, graphicsuri) < 0) VIR_WARN("unable to provide data for graphics client relocation"); - if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, migParams, mig->caps->automatic) < 0) goto error; @@ -3585,7 +3557,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, migrate_speed * 1024 * 1024) < 0) goto error; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, migParams) < 0) goto error; @@ -3632,8 +3604,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, goto error; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; if (priv->job.abortJob) { @@ -3689,7 +3660,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, break; } - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto error; /* From this point onwards we *must* call cancel to abort the @@ -3713,8 +3684,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_POSTCOPY) waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY; - rc = qemuMigrationSrcWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT, + rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn, waitFlags); if (rc == -2) { goto error; @@ -3736,7 +3706,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, } if (mig->nbd && - qemuMigrationSrcNBDCopyCancel(driver, vm, true, + qemuMigrationSrcNBDCopyCancel(vm, true, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn) < 0) goto error; @@ -3746,15 +3716,13 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, * end of the migration. */ if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { - if (qemuMigrationSrcContinue(driver, vm, - QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, + if (qemuMigrationSrcContinue(vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER; - rc = qemuMigrationSrcWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT, + rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn, waitFlags); if (rc == -2) { goto error; @@ -3806,15 +3774,14 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (virDomainObjIsActive(vm)) { if (cancel && jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && - qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { + qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMonitorMigrateCancel(priv->mon); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } /* cancel any outstanding NBD jobs */ if (mig && mig->nbd) - qemuMigrationSrcNBDCopyCancel(driver, vm, false, + qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn); @@ -3828,7 +3795,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, goto cleanup; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto error; } @@ -4063,7 +4030,7 @@ qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver, * until the migration is complete. */ VIR_DEBUG("Perform %p", sconn); - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_PERFORM2); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM2); if (flags & VIR_MIGRATE_TUNNELLED) ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL, NULL, 0, NULL, NULL, @@ -4301,7 +4268,7 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver, * confirm migration completion. */ VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri)); - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_PERFORM3); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM3); VIR_FREE(cookiein); cookiein = g_steal_pointer(&cookieout); cookieinlen = cookieoutlen; @@ -4326,7 +4293,7 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver, if (ret < 0) { virErrorPreserveLast(&orig_err); } else { - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_PERFORM3_DONE); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM3_DONE); } /* If Perform returns < 0, then we need to cancel the VM @@ -4666,8 +4633,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - flags) < 0) + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) goto cleanup; if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0) @@ -4690,7 +4656,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, migParams, flags, dname, resource, &v3proto); } else { - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_PERFORM2); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM2); ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, NULL, NULL, 0, NULL, @@ -4721,12 +4687,12 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, * here */ if (!v3proto && ret < 0) - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); qemuMigrationSrcRestoreDomainState(driver, vm); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); if (!virDomainObjIsActive(vm) && ret == 0) { if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) { virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm); @@ -4768,14 +4734,14 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, /* If we didn't start the job in the begin phase, start it now. */ if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) return ret; } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) { return ret; } - qemuMigrationJobStartPhase(driver, vm, VIR_MIGRATION_PHASE_PERFORM3); + qemuMigrationJobStartPhase(vm, VIR_MIGRATION_PHASE_PERFORM3); virCloseCallbacksUnset(driver->closeCallbacks, vm, qemuMigrationSrcCleanup); @@ -4789,7 +4755,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, goto endjob; } - qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_PERFORM3_DONE); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM3_DONE); if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn, qemuMigrationSrcCleanup) < 0) @@ -4797,9 +4763,9 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, endjob: if (ret < 0) { - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); } else { qemuMigrationJobContinue(vm); } @@ -5021,7 +4987,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, ignore_value(virTimeMillisNow(&timeReceived)); - qemuMigrationJobStartPhase(driver, vm, + qemuMigrationJobStartPhase(vm, v3proto ? VIR_MIGRATION_PHASE_FINISH3 : VIR_MIGRATION_PHASE_FINISH2); @@ -5050,7 +5016,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* Check for a possible error on the monitor in case Finish was called * earlier than monitor EOF handler got a chance to process the error */ - qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN); + qemuDomainCheckMonitor(vm, QEMU_ASYNC_JOB_MIGRATION_IN); goto endjob; } @@ -5067,7 +5033,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide network data for relocation"); - if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0) + if (qemuMigrationDstStopNBDServer(vm, mig) < 0) goto endjob; if (qemuRefreshVirtioChannelState(driver, vm, @@ -5099,8 +5065,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* We need to wait for QEMU to process all data sent by the source * before starting guest CPUs. */ - if (qemuMigrationDstWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN, !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) { /* There's not much we can do for v2 protocol since the * original domain on the source host is already gone. @@ -5168,8 +5133,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, } if (inPostCopy) { - if (qemuMigrationDstWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN, false) < 0) { goto endjob; } @@ -5250,10 +5214,10 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); } - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN, jobPriv->migParams, priv->job.apiFlags); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); if (!virDomainObjIsActive(vm)) qemuDomainRemoveInactiveJob(driver, vm); @@ -5306,16 +5270,16 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024) < 0) return -1; - if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0) + if (qemuMigrationParamsApply(vm, asyncJob, migParams) < 0) return -1; priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX; } else { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMonitorSetMigrationSpeed(priv->mon, QEMU_DOMAIN_MIG_BANDWIDTH_MAX); priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } } @@ -5338,7 +5302,7 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, compressor ? pipeFD[1] : fd) < 0) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; if (!compressor) { @@ -5353,11 +5317,11 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, if (virSetCloseExec(pipeFD[1]) < 0) { virReportSystemError(errno, "%s", _("Unable to set cloexec flag")); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } if (virCommandRunAsync(compressor, NULL) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } rc = qemuMonitorMigrateToFd(priv->mon, @@ -5367,21 +5331,21 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, VIR_CLOSE(pipeFD[1]) < 0) VIR_WARN("failed to close intermediate pipe"); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) goto cleanup; - rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0); + rc = qemuMigrationSrcWaitForCompletion(vm, asyncJob, NULL, 0); if (rc < 0) { if (rc == -2) { virErrorPreserveLast(&orig_err); virCommandAbort(compressor); if (virDomainObjIsActive(vm) && - qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMonitorMigrateCancel(priv->mon); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } goto cleanup; @@ -5403,12 +5367,12 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, if (qemuMigrationParamsSetULL(migParams, QEMU_MIGRATION_PARAM_MAX_BANDWIDTH, saveMigBandwidth * 1024 * 1024) == 0) - ignore_value(qemuMigrationParamsApply(driver, vm, asyncJob, + ignore_value(qemuMigrationParamsApply(vm, asyncJob, migParams)); } else { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } priv->migMaxBandwidth = saveMigBandwidth; @@ -5428,8 +5392,7 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, int -qemuMigrationSrcCancel(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuMigrationSrcCancel(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; bool storage = false; @@ -5438,9 +5401,9 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", vm->def->name); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorMigrateCancel(priv->mon)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; for (i = 0; i < vm->def->ndisks; i++) { @@ -5461,7 +5424,7 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, } if (storage && - qemuMigrationSrcNBDCopyCancel(driver, vm, false, + qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_NONE, NULL) < 0) return -1; @@ -5470,8 +5433,7 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, static int -qemuMigrationJobStart(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStart(virDomainObjPtr vm, qemuDomainAsyncJob job, unsigned long apiFlags) { @@ -5490,7 +5452,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, JOB_MASK(QEMU_JOB_MIGRATION_OP); } - if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) + if (qemuDomainObjBeginAsyncJob(vm, job, op, apiFlags) < 0) return -1; jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; @@ -5500,8 +5462,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, } static void -qemuMigrationJobSetPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobSetPhase(virDomainObjPtr vm, virMigrationJobPhase phase) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5513,15 +5474,14 @@ qemuMigrationJobSetPhase(virQEMUDriverPtr driver, return; } - qemuDomainObjSetJobPhase(driver, vm, phase); + qemuDomainObjSetJobPhase(vm, phase); } static void -qemuMigrationJobStartPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStartPhase(virDomainObjPtr vm, virMigrationJobPhase phase) { - qemuMigrationJobSetPhase(driver, vm, phase); + qemuMigrationJobSetPhase(vm, phase); } static void @@ -5551,9 +5511,9 @@ qemuMigrationJobIsActive(virDomainObjPtr vm, } static void -qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm) +qemuMigrationJobFinish(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); } @@ -5610,8 +5570,7 @@ qemuMigrationDstErrorReport(virQEMUDriverPtr driver, int -qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo) { @@ -5632,12 +5591,12 @@ qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver, if (!nbd) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon, false); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo) + if (qemuDomainObjExitMonitor(vm) < 0 || !blockinfo) return -1; memset(stats, 0, sizeof(*stats)); diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index b05f5254b4..8f5e2d0f81 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -195,12 +195,10 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; int -qemuMigrationSrcCancel(virQEMUDriverPtr driver, - virDomainObjPtr vm); +qemuMigrationSrcCancel(virDomainObjPtr vm); int -qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationAnyFetchStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo, char **error); @@ -226,8 +224,7 @@ qemuMigrationDstGetURI(const char *migrateFrom, int migrateFd); int -qemuMigrationDstRun(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstRun(virDomainObjPtr vm, const char *uri, qemuDomainAsyncJob asyncJob); @@ -236,7 +233,6 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver, virDomainObjPtr vm); int -qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo); diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index 6cf1c22812..68f4735bc7 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -450,7 +450,6 @@ qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig, static int qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig, - virQEMUDriverPtr driver, virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -473,13 +472,13 @@ qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig, mig->nbd->disks = g_new0(struct qemuMigrationCookieNBDDisk, vm->def->ndisks); mig->nbd->ndisks = 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, priv->job.asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, priv->job.asyncJob) < 0) return -1; if (blockdev) rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats); else rc = qemuMonitorBlockStatsUpdateCapacity(priv->mon, stats, false); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; for (i = 0; i < vm->def->ndisks; i++) { @@ -1421,7 +1420,7 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, } if ((flags & QEMU_MIGRATION_COOKIE_NBD) && - qemuMigrationCookieAddNBD(mig, driver, dom) < 0) + qemuMigrationCookieAddNBD(mig, dom) < 0) return -1; if (flags & QEMU_MIGRATION_COOKIE_STATS && diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 231a8a2ee8..12f94098c5 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -786,7 +786,6 @@ qemuMigrationCapsToJSON(virBitmapPtr caps, /** * qemuMigrationParamsApply - * @driver: qemu driver * @vm: domain object * @asyncJob: migration job * @migParams: migration parameters to send to QEMU @@ -796,8 +795,7 @@ qemuMigrationCapsToJSON(virBitmapPtr caps, * Returns 0 on success, -1 on failure. */ int -qemuMigrationParamsApply(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsApply(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams) { @@ -809,7 +807,7 @@ qemuMigrationParamsApply(virQEMUDriverPtr driver, int ret = -1; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (asyncJob == QEMU_ASYNC_JOB_NONE) { @@ -857,7 +855,7 @@ qemuMigrationParamsApply(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (xbzrleCacheSize_old) @@ -958,9 +956,9 @@ qemuMigrationParamsEnableTLS(virQEMUDriverPtr driver, * This should prevent any issues just in case some cleanup wasn't * properly completed (both src and dst use the same alias) or * some other error path between now and perform . */ - qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, *tlsAlias); + qemuDomainDelTLSObjects(vm, asyncJob, secAlias, *tlsAlias); - if (qemuDomainAddTLSObjects(driver, vm, asyncJob, &secProps, &tlsProps) < 0) + if (qemuDomainAddTLSObjects(vm, asyncJob, &secProps, &tlsProps) < 0) return -1; if (qemuMigrationParamsSetString(migParams, @@ -1009,7 +1007,6 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, /* qemuMigrationParamsResetTLS - * @driver: pointer to qemu driver * @vm: domain object * @asyncJob: migration job to join * @apiFlags: API flags used to start the migration @@ -1018,8 +1015,7 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, * security objects and free the secinfo */ static void -qemuMigrationParamsResetTLS(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsResetTLS(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr origParams, unsigned long apiFlags) @@ -1036,14 +1032,13 @@ qemuMigrationParamsResetTLS(virQEMUDriverPtr driver, tlsAlias = qemuAliasTLSObjFromSrcAlias(QEMU_MIGRATION_TLS_ALIAS_BASE); secAlias = qemuAliasForSecret(QEMU_MIGRATION_TLS_ALIAS_BASE, NULL); - qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, tlsAlias); + qemuDomainDelTLSObjects(vm, asyncJob, secAlias, tlsAlias); g_clear_pointer(&QEMU_DOMAIN_PRIVATE(vm)->migSecinfo, qemuDomainSecretInfoFree); } int -qemuMigrationParamsFetch(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsFetch(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr *migParams) { @@ -1053,12 +1048,12 @@ qemuMigrationParamsFetch(virQEMUDriverPtr driver, *migParams = NULL; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetMigrationParams(priv->mon, &jsonParams); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (!(*migParams = qemuMigrationParamsFromJSON(jsonParams))) @@ -1112,8 +1107,7 @@ qemuMigrationParamsGetULL(qemuMigrationParamsPtr migParams, * are unsupported by QEMU. */ int -qemuMigrationParamsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsCheck(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams, virBitmapPtr remoteCaps) @@ -1173,7 +1167,7 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver, * to ask QEMU for their current settings. */ - return qemuMigrationParamsFetch(driver, vm, asyncJob, &jobPriv->migParams); + return qemuMigrationParamsFetch(vm, asyncJob, &jobPriv->migParams); } @@ -1184,8 +1178,7 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver, * migration (save, managedsave, snapshots, dump) will not try to use them. */ void -qemuMigrationParamsReset(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsReset(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr origParams, unsigned long apiFlags) @@ -1200,10 +1193,10 @@ qemuMigrationParamsReset(virQEMUDriverPtr driver, if (!virDomainObjIsActive(vm) || !origParams) goto cleanup; - if (qemuMigrationParamsApply(driver, vm, asyncJob, origParams) < 0) + if (qemuMigrationParamsApply(vm, asyncJob, origParams) < 0) goto cleanup; - qemuMigrationParamsResetTLS(driver, vm, asyncJob, origParams, apiFlags); + qemuMigrationParamsResetTLS(vm, asyncJob, origParams, apiFlags); cleanup: virErrorRestore(&err); @@ -1341,8 +1334,7 @@ qemuMigrationParamsParse(xmlXPathContextPtr ctxt, int -qemuMigrationCapsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationCapsCheck(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -1352,12 +1344,12 @@ qemuMigrationCapsCheck(virQEMUDriverPtr driver, char **capStr; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetMigrationCapabilities(priv->mon, &caps); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (!caps) @@ -1388,13 +1380,13 @@ qemuMigrationCapsCheck(virQEMUDriverPtr driver, if (!(json = qemuMigrationCapsToJSON(migEvent, migEvent))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorSetMigrationCapabilities(priv->mon, json); json = NULL; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc < 0) { diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 9aea24725f..231f4db90b 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -95,8 +95,7 @@ qemuMigrationParamsFree(qemuMigrationParamsPtr migParams); G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuMigrationParams, qemuMigrationParamsFree); int -qemuMigrationParamsApply(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsApply(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams); @@ -114,8 +113,7 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, qemuMigrationParamsPtr migParams); int -qemuMigrationParamsFetch(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsFetch(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr *migParams); @@ -130,15 +128,13 @@ qemuMigrationParamsGetULL(qemuMigrationParamsPtr migParams, unsigned long long *value); int -qemuMigrationParamsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsCheck(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams, virBitmapPtr remoteCaps); void -qemuMigrationParamsReset(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsReset(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr origParams, unsigned long apiFlags); @@ -152,8 +148,7 @@ qemuMigrationParamsParse(xmlXPathContextPtr ctxt, qemuMigrationParamsPtr *migParams); int -qemuMigrationCapsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationCapsCheck(virDomainObjPtr vm, int asyncJob); bool diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index aa36264b6c..5eadcd1a6c 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -422,7 +422,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, if (vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY || vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -436,7 +436,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainAuditStop(vm, "destroyed"); qemuDomainRemoveInactive(driver, vm); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } ret = 0; @@ -467,7 +467,7 @@ qemuProcessFakeReboot(void *opaque) VIR_DEBUG("vm=%p", vm); virObjectLock(vm); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -476,10 +476,10 @@ qemuProcessFakeReboot(void *opaque) goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSystemReset(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) @@ -505,7 +505,7 @@ qemuProcessFakeReboot(void *opaque) ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: priv->pausedShutdown = false; @@ -1948,18 +1948,17 @@ qemuProcessMonitorLogFree(void *opaque) static int -qemuProcessInitMonitor(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessInitMonitor(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorSetCapabilities(QEMU_DOMAIN_PRIVATE(vm)->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -2018,10 +2017,10 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, return -1; } - if (qemuProcessInitMonitor(driver, vm, asyncJob) < 0) + if (qemuProcessInitMonitor(vm, asyncJob) < 0) return -1; - if (qemuMigrationCapsCheck(driver, vm, asyncJob) < 0) + if (qemuMigrationCapsCheck(vm, asyncJob) < 0) return -1; return 0; @@ -2256,11 +2255,11 @@ qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, virHashTablePtr info = NULL; int ret = -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ret = qemuMonitorGetChardevInfo(priv->mon, &info); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -2301,8 +2300,7 @@ qemuProcessRefreshPRManagerState(virDomainObjPtr vm, static int -qemuRefreshPRManagerState(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuRefreshPRManagerState(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; virHashTablePtr info = NULL; @@ -2312,9 +2310,9 @@ qemuRefreshPRManagerState(virQEMUDriverPtr driver, !qemuDomainDefHasManagedPR(vm)) return 0; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetPRManagerInfo(priv->mon, &info); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -2329,8 +2327,7 @@ qemuRefreshPRManagerState(virQEMUDriverPtr driver, static void -qemuRefreshRTC(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuRefreshRTC(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; time_t now, then; @@ -2342,10 +2339,10 @@ qemuRefreshRTC(virQEMUDriverPtr driver, return; memset(&thenbits, 0, sizeof(thenbits)); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); now = time(NULL); rv = qemuMonitorGetRTCTime(priv->mon, &thenbits); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rv = -1; if (rv < 0) @@ -2366,8 +2363,7 @@ qemuRefreshRTC(virQEMUDriverPtr driver, } int -qemuProcessRefreshBalloonState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessRefreshBalloonState(virDomainObjPtr vm, int asyncJob) { unsigned long long balloon; @@ -2380,11 +2376,11 @@ qemuProcessRefreshBalloonState(virQEMUDriverPtr driver, return 0; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetBalloonInfo(qemuDomainGetMonitor(vm), &balloon); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; vm->def->mem.cur_balloon = balloon; @@ -2418,11 +2414,11 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, * reliable if it's available. * Note that the monitor itself can be on a pty, so we still need to try the * log output method. */ - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ret = qemuMonitorGetChardevInfo(priv->mon, &info); VIR_DEBUG("qemuMonitorGetChardevInfo returned %i", ret); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret == 0) { @@ -2446,8 +2442,7 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, static int -qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessDetectIOThreadPIDs(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -2462,10 +2457,10 @@ qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver, } /* Get the list of IOThreads from qemu */ - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (niothreads < 0) goto cleanup; @@ -2590,8 +2585,7 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm G_GNUC_UNUSED) /* set link states to down on interfaces at qemu start */ static int -qemuProcessSetLinkStates(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetLinkStates(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -2600,7 +2594,7 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, int ret = -1; int rv; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; for (i = 0; i < def->nnets; i++) { @@ -2628,7 +2622,7 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; } @@ -2979,14 +2973,12 @@ qemuProcessInitPasswords(virQEMUDriverPtr driver, for (i = 0; i < vm->def->ngraphics; ++i) { virDomainGraphicsDefPtr graphics = vm->def->graphics[i]; if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) { - ret = qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_VNC, + ret = qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC, &graphics->data.vnc.auth, cfg->vncPassword, asyncJob); } else if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) { - ret = qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_SPICE, + ret = qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE, &graphics->data.spice.auth, cfg->spicePassword, asyncJob); @@ -3058,7 +3050,7 @@ qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver, virDomainVideoDefPtr video = NULL; g_autoptr(virQEMUDriverConfig) cfg = NULL; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; for (i = 0; i < vm->def->nvideos; i++) { @@ -3110,7 +3102,7 @@ qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver, } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; cfg = virQEMUDriverGetConfig(driver); @@ -3119,7 +3111,7 @@ qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver, return ret; error: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); return -1; } @@ -3238,11 +3230,11 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, priv->runningReason = reason; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto release; ret = qemuMonitorStartCPUs(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -3277,11 +3269,11 @@ int qemuProcessStopCPUs(virQEMUDriverPtr driver, priv->pausedReason = reason; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ret = qemuMonitorStopCPUs(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -3361,7 +3353,7 @@ qemuProcessFiltersInstantiate(virDomainDefPtr def) } static int -qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm) +qemuProcessUpdateState(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; virDomainState state; @@ -3373,9 +3365,9 @@ qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm) g_autofree char *msg = NULL; int ret; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetStatus(priv->mon, &running, &reason); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (ret < 0) @@ -3480,7 +3472,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, break; } - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE, jobPriv->migParams, job->apiFlags); return 0; } @@ -3524,7 +3516,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } else { VIR_DEBUG("Cancelling unfinished migration of domain %s", vm->def->name); - if (qemuMigrationSrcCancel(driver, vm) < 0) { + if (qemuMigrationSrcCancel(vm) < 0) { VIR_WARN("Could not cancel ongoing migration of domain %s", vm->def->name); } @@ -3576,7 +3568,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } } - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE, jobPriv->migParams, job->apiFlags); return 0; } @@ -3611,9 +3603,9 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, case QEMU_ASYNC_JOB_SAVE: case QEMU_ASYNC_JOB_DUMP: case QEMU_ASYNC_JOB_SNAPSHOT: - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorMigrateCancel(priv->mon)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; /* resume the domain but only if it was paused as a result of * running a migration-to-file operation. Although we are @@ -3722,7 +3714,7 @@ qemuProcessUpdateDevices(virQEMUDriverPtr driver, old = priv->qemuDevices; priv->qemuDevices = NULL; - if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; qemuDevices = (const char **)priv->qemuDevices; @@ -4197,8 +4189,7 @@ qemuProcessTranslateCPUFeatures(const char *name, static int -qemuProcessFetchGuestCPU(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessFetchGuestCPU(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virCPUDataPtr *enabled, virCPUDataPtr *disabled) @@ -4217,7 +4208,7 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver, if (!generic && !ARCH_IS_X86(vm->def->os.arch)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (generic) { @@ -4229,7 +4220,7 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver, rc = qemuMonitorGetGuestCPUx86(priv->mon, &dataEnabled, &dataDisabled); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc == -1) @@ -4304,15 +4295,14 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm, static int -qemuProcessUpdateAndVerifyCPU(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessUpdateAndVerifyCPU(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { virCPUDataPtr cpu = NULL; virCPUDataPtr disabled = NULL; int ret = -1; - if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0) + if (qemuProcessFetchGuestCPU(vm, asyncJob, &cpu, &disabled) < 0) goto cleanup; if (qemuProcessVerifyCPU(vm, cpu) < 0) @@ -4331,8 +4321,7 @@ qemuProcessUpdateAndVerifyCPU(virQEMUDriverPtr driver, static int -qemuProcessFetchCPUDefinitions(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessFetchCPUDefinitions(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virDomainCapsCPUModelsPtr *cpuModels) { @@ -4340,12 +4329,12 @@ qemuProcessFetchCPUDefinitions(virQEMUDriverPtr driver, g_autoptr(virDomainCapsCPUModels) models = NULL; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = virQEMUCapsFetchCPUModels(priv->mon, vm->def->os.arch, &models); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; *cpuModels = g_steal_pointer(&models); @@ -4354,8 +4343,7 @@ qemuProcessFetchCPUDefinitions(virQEMUDriverPtr driver, static int -qemuProcessUpdateCPU(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessUpdateCPU(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { g_autoptr(virCPUData) cpu = NULL; @@ -4367,13 +4355,13 @@ qemuProcessUpdateCPU(virQEMUDriverPtr driver, */ vm->def->cpu->fallback = VIR_CPU_FALLBACK_ALLOW; - if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0) + if (qemuProcessFetchGuestCPU(vm, asyncJob, &cpu, &disabled) < 0) return -1; if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0) return -1; - if (qemuProcessFetchCPUDefinitions(driver, vm, asyncJob, &models) < 0 || + if (qemuProcessFetchCPUDefinitions(vm, asyncJob, &models) < 0 || virCPUTranslate(vm->def->os.arch, vm->def->cpu, models) < 0) return -1; @@ -4579,12 +4567,11 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, * parameter between qemuProcessBeginJob and qemuProcessEndJob. */ int -qemuProcessBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessBeginJob(virDomainObjPtr vm, virDomainJobOperation operation, unsigned long apiFlags) { - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_START, operation, apiFlags) < 0) return -1; @@ -4594,10 +4581,9 @@ qemuProcessBeginJob(virQEMUDriverPtr driver, void -qemuProcessEndJob(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuProcessEndJob(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); } @@ -5054,8 +5040,7 @@ qemuProcessSetupRawIO(virQEMUDriverPtr driver, static int -qemuProcessSetupBalloon(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetupBalloon(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { unsigned long long balloon = vm->def->mem.cur_balloon; @@ -5065,7 +5050,7 @@ qemuProcessSetupBalloon(virQEMUDriverPtr driver, if (!virDomainDefHasMemballoon(vm->def)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (vm->def->memballoon->period) @@ -5077,7 +5062,7 @@ qemuProcessSetupBalloon(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; } @@ -5920,8 +5905,7 @@ qemuProcessVcpusSortOrder(const void *a, static int -qemuProcessSetupHotpluggableVcpus(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetupHotpluggableVcpus(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def); @@ -5967,13 +5951,13 @@ qemuProcessSetupHotpluggableVcpus(virQEMUDriverPtr driver, if (!(vcpuprops = qemuBuildHotpluggableCPUProps(vcpu))) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops); vcpuprops = NULL; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) @@ -6604,8 +6588,7 @@ qemuProcessGenID(virDomainObjPtr vm, * Same hack is done in qemuDomainAttachDiskGeneric. */ static int -qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetupDiskThrottlingBlockdev(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -6617,7 +6600,7 @@ qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriverPtr driver, VIR_DEBUG("Setting up disk throttling for -blockdev via block_set_io_throttle"); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; for (i = 0; i < vm->def->ndisks; i++) { @@ -6640,7 +6623,7 @@ qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; } @@ -6924,23 +6907,23 @@ qemuProcessLaunch(virConnectPtr conn, goto cleanup; VIR_DEBUG("Verifying and updating provided guest CPU"); - if (qemuProcessUpdateAndVerifyCPU(driver, vm, asyncJob) < 0) + if (qemuProcessUpdateAndVerifyCPU(vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("setting up hotpluggable cpus"); if (qemuDomainHasHotpluggableStartupVcpus(vm->def)) { - if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, asyncJob, false) < 0) goto cleanup; if (qemuProcessValidateHotpluggableVcpus(vm->def) < 0) goto cleanup; - if (qemuProcessSetupHotpluggableVcpus(driver, vm, asyncJob) < 0) + if (qemuProcessSetupHotpluggableVcpus(vm, asyncJob) < 0) goto cleanup; } VIR_DEBUG("Refreshing VCPU info"); - if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, asyncJob, false) < 0) goto cleanup; if (qemuDomainValidateVcpuInfo(vm) < 0) @@ -6949,7 +6932,7 @@ qemuProcessLaunch(virConnectPtr conn, qemuDomainVcpuPersistOrder(vm->def); VIR_DEBUG("Detecting IOThread PIDs"); - if (qemuProcessDetectIOThreadPIDs(driver, vm, asyncJob) < 0) + if (qemuProcessDetectIOThreadPIDs(vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("Setting global CPU cgroup (if required)"); @@ -6979,21 +6962,21 @@ qemuProcessLaunch(virConnectPtr conn, /* qemu doesn't support setting this on the command line, so * enter the monitor */ VIR_DEBUG("Setting network link states"); - if (qemuProcessSetLinkStates(driver, vm, asyncJob) < 0) + if (qemuProcessSetLinkStates(vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("Setting initial memory amount"); - if (qemuProcessSetupBalloon(driver, vm, asyncJob) < 0) + if (qemuProcessSetupBalloon(vm, asyncJob) < 0) goto cleanup; - if (qemuProcessSetupDiskThrottlingBlockdev(driver, vm, asyncJob) < 0) + if (qemuProcessSetupDiskThrottlingBlockdev(vm, asyncJob) < 0) goto cleanup; /* Since CPUs were not started yet, the balloon could not return the memory * to the host and thus cur_balloon needs to be updated so that GetXMLdesc * and friends return the correct size in case they can't grab the job */ if (!incoming && !snapshot && - qemuProcessRefreshBalloonState(driver, vm, asyncJob) < 0) + qemuProcessRefreshBalloonState(vm, asyncJob) < 0) goto cleanup; if (flags & VIR_QEMU_PROCESS_START_AUTODESTROY && @@ -7026,11 +7009,11 @@ qemuProcessRefreshState(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; VIR_DEBUG("Fetching list of active devices"); - if (qemuDomainUpdateDeviceList(driver, vm, asyncJob) < 0) + if (qemuDomainUpdateDeviceList(vm, asyncJob) < 0) return -1; VIR_DEBUG("Updating info of memory devices"); - if (qemuDomainUpdateMemoryDeviceInfo(driver, vm, asyncJob) < 0) + if (qemuDomainUpdateMemoryDeviceInfo(vm, asyncJob) < 0) return -1; VIR_DEBUG("Detecting actual memory size for video device"); @@ -7038,10 +7021,10 @@ qemuProcessRefreshState(virQEMUDriverPtr driver, return -1; VIR_DEBUG("Updating disk data"); - if (qemuProcessRefreshDisks(driver, vm, asyncJob) < 0) + if (qemuProcessRefreshDisks(vm, asyncJob) < 0) return -1; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && - qemuBlockNodeNamesDetect(driver, vm, asyncJob) < 0) + qemuBlockNodeNamesDetect(vm, asyncJob) < 0) return -1; return 0; @@ -7160,7 +7143,7 @@ qemuProcessStart(virConnectPtr conn, if (incoming) { if (incoming->deferredURI && - qemuMigrationDstRun(driver, vm, incoming->deferredURI, asyncJob) < 0) + qemuMigrationDstRun(vm, incoming->deferredURI, asyncJob) < 0) goto stop; } else { /* Refresh state of devices from QEMU. During migration this happens @@ -7288,8 +7271,7 @@ qemuProcessKill(virDomainObjPtr vm, unsigned int flags) * qemuProcessStop. */ int -qemuProcessBeginStopJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessBeginStopJob(virDomainObjPtr vm, qemuDomainJob job, bool forceKill) { @@ -7309,7 +7291,7 @@ qemuProcessBeginStopJob(virQEMUDriverPtr driver, /* Wake up anything waiting on domain condition */ virDomainObjBroadcast(vm); - if (qemuDomainObjBeginJob(driver, vm, job) < 0) + if (qemuDomainObjBeginJob(vm, job) < 0) goto cleanup; ret = 0; @@ -7350,7 +7332,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); if (asyncJob != QEMU_ASYNC_JOB_NONE) { - if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0) + if (qemuDomainObjBeginNestedJob(vm, asyncJob) < 0) goto cleanup; } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE && priv->job.asyncOwner == virThreadSelfID() && @@ -7656,7 +7638,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, endjob: if (asyncJob != QEMU_ASYNC_JOB_NONE) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virErrorRestore(&orig_err); @@ -7681,12 +7663,12 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, if (priv->job.asyncJob) { VIR_DEBUG("vm=%s has long-term job active, cancelling", dom->def->name); - qemuDomainObjDiscardAsyncJob(driver, dom); + qemuDomainObjDiscardAsyncJob(dom); } VIR_DEBUG("Killing domain"); - if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0) + if (qemuProcessBeginStopJob(dom, QEMU_JOB_DESTROY, true) < 0) return; qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED, @@ -7699,7 +7681,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, qemuDomainRemoveInactive(driver, dom); - qemuDomainObjEndJob(driver, dom); + qemuDomainObjEndJob(dom); virObjectEventStateQueue(driver->domainEventState, event); } @@ -7732,8 +7714,7 @@ bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver, int -qemuProcessRefreshDisks(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessRefreshDisks(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -7742,9 +7723,9 @@ qemuProcessRefreshDisks(virQEMUDriverPtr driver, int ret = -1; size_t i; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { table = qemuMonitorGetBlockInfo(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; } @@ -7789,8 +7770,7 @@ qemuProcessRefreshDisks(virQEMUDriverPtr driver, static int -qemuProcessRefreshCPUMigratability(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessRefreshCPUMigratability(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -7809,12 +7789,12 @@ qemuProcessRefreshCPUMigratability(virQEMUDriverPtr driver, if (!ARCH_IS_X86(def->os.arch)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetCPUMigratable(priv->mon, &migratable); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (rc == 1) @@ -7849,7 +7829,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver, if (!vm->def->cpu) return 0; - if (qemuProcessRefreshCPUMigratability(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshCPUMigratability(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; if (!(host = virQEMUDriverGetHostCPU(driver))) { @@ -7884,7 +7864,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver, if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0) return -1; - if (qemuProcessUpdateCPU(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessUpdateCPU(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; } else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) { /* We only try to fix CPUs when the libvirt/QEMU combo used to start @@ -7963,15 +7943,14 @@ qemuProcessRefreshLegacyBlockjob(void *payload, static int -qemuProcessRefreshLegacyBlockjobs(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuProcessRefreshLegacyBlockjobs(virDomainObjPtr vm) { virHashTablePtr blockJobs = NULL; int ret = -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); blockJobs = qemuMonitorGetAllBlockJobInfo(qemuDomainGetMonitor(vm), true); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockJobs) + if (qemuDomainObjExitMonitor(vm) < 0 || !blockJobs) goto cleanup; if (virHashForEach(blockJobs, qemuProcessRefreshLegacyBlockjob, vm) < 0) @@ -7986,15 +7965,14 @@ qemuProcessRefreshLegacyBlockjobs(virQEMUDriverPtr driver, static int -qemuProcessRefreshBlockjobs(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuProcessRefreshBlockjobs(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) - return qemuBlockJobRefreshJobs(driver, vm); + return qemuBlockJobRefreshJobs(vm); else - return qemuProcessRefreshLegacyBlockjobs(driver, vm); + return qemuProcessRefreshLegacyBlockjobs(vm); } @@ -8049,7 +8027,7 @@ qemuProcessReconnect(void *opaque) cfg = virQEMUDriverGetConfig(driver); priv = obj->privateData; - if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(obj, QEMU_JOB_MODIFY) < 0) goto error; jobStarted = true; @@ -8133,7 +8111,7 @@ qemuProcessReconnect(void *opaque) goto error; } - if (qemuProcessUpdateState(driver, obj) < 0) + if (qemuProcessUpdateState(obj) < 0) goto error; state = virDomainObjGetState(obj, &reason); @@ -8182,12 +8160,12 @@ qemuProcessReconnect(void *opaque) if (qemuProcessRefreshCPU(driver, obj) < 0) goto error; - if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0) + if (qemuDomainRefreshVcpuInfo(obj, QEMU_ASYNC_JOB_NONE, true) < 0) goto error; qemuDomainVcpuPersistOrder(obj->def); - if (qemuProcessDetectIOThreadPIDs(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessDetectIOThreadPIDs(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0) @@ -8197,32 +8175,32 @@ qemuProcessReconnect(void *opaque) qemuProcessFiltersInstantiate(obj->def); - if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshDisks(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && - qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockNodeNamesDetect(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; /* If querying of guest's RTC failed, report error, but do not kill the domain. */ - qemuRefreshRTC(driver, obj); + qemuRefreshRTC(obj); - if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshBalloonState(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0) goto error; - if (qemuProcessRefreshBlockjobs(driver, obj) < 0) + if (qemuProcessRefreshBlockjobs(obj) < 0) goto error; if (qemuProcessUpdateDevices(driver, obj) < 0) goto error; - if (qemuRefreshPRManagerState(driver, obj) < 0) + if (qemuRefreshPRManagerState(obj) < 0) goto error; qemuProcessReconnectCheckMemAliasOrderMismatch(obj); @@ -8276,7 +8254,7 @@ qemuProcessReconnect(void *opaque) if (jobStarted) { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactive(driver, obj); - qemuDomainObjEndJob(driver, obj); + qemuDomainObjEndJob(obj); } else { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactiveJob(driver, obj); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index dbd989c321..448b65537a 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -66,12 +66,10 @@ qemuProcessIncomingDefPtr qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, const char *path); void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr inc); -int qemuProcessBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessBeginJob(virDomainObjPtr vm, virDomainJobOperation operation, unsigned long apiFlags); -void qemuProcessEndJob(virQEMUDriverPtr driver, - virDomainObjPtr vm); +void qemuProcessEndJob(virDomainObjPtr vm); typedef enum { VIR_QEMU_PROCESS_START_COLD = 1 << 0, @@ -145,8 +143,7 @@ typedef enum { VIR_QEMU_PROCESS_STOP_NO_RELABEL = 1 << 1, } qemuProcessStopFlags; -int qemuProcessBeginStopJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessBeginStopJob(virDomainObjPtr vm, qemuDomainJob job, bool forceKill); void qemuProcessStop(virQEMUDriverPtr driver, @@ -195,12 +192,10 @@ int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); -int qemuProcessRefreshBalloonState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessRefreshBalloonState(virDomainObjPtr vm, int asyncJob); -int qemuProcessRefreshDisks(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessRefreshDisks(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); int qemuProcessStartManagedPRDaemon(virDomainObjPtr vm) G_GNUC_NO_INLINE; diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 9f4a146861..292e360193 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -295,14 +295,13 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver, } } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_SNAPSHOT) < 0) { + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0) { resume = false; goto cleanup; } ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) goto cleanup; @@ -809,11 +808,11 @@ qemuSnapshotDiskCleanup(qemuSnapshotDiskDataPtr data, * be set to NULL by qemuSnapshotDiskUpdateSource */ if (data[i].src) { if (data[i].blockdevadded) { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data[i].crdata->srcdata[0]); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } @@ -880,8 +879,7 @@ qemuSnapshotDiskBitmapsPropagate(qemuSnapshotDiskDataPtr dd, static int -qemuSnapshotDiskPrepareOneBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuSnapshotDiskPrepareOneBlockdev(virDomainObjPtr vm, qemuSnapshotDiskDataPtr dd, virQEMUDriverConfigPtr cfg, bool reuse, @@ -907,13 +905,13 @@ qemuSnapshotDiskPrepareOneBlockdev(virQEMUDriverPtr driver, return -1; if (reuse) { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), dd->crdata->srcdata[0]); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; } else { if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData, @@ -1021,7 +1019,7 @@ qemuSnapshotDiskPrepareOne(virQEMUDriverPtr driver, dd->prepared = true; if (blockdev) { - if (qemuSnapshotDiskPrepareOneBlockdev(driver, vm, dd, cfg, reuse, + if (qemuSnapshotDiskPrepareOneBlockdev(vm, dd, cfg, reuse, blockNamedNodeData, asyncJob) < 0) return -1; @@ -1188,12 +1186,12 @@ qemuSnapshotCreateDiskActive(virQEMUDriverPtr driver, goto cleanup; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rc = -1; for (i = 0; i < ndiskdata; i++) { @@ -1250,7 +1248,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) { int freeze; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) { @@ -1395,7 +1393,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, } if (thaw != 0 && - qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) >= 0 && + qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) >= 0 && virDomainObjIsActive(vm)) { if (qemuSnapshotFSThaw(vm, ret == 0 && thaw > 0) < 0) { /* helper reported the error, if it was needed */ @@ -1546,7 +1544,7 @@ qemuSnapshotCreateXML(virDomainPtr domain, * a regular job, so we need to set the job mask to disallow query as * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SNAPSHOT, VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0) goto cleanup; @@ -1681,7 +1679,7 @@ qemuSnapshotCreateXML(virDomainPtr domain, virDomainSnapshotObjListRemove(vm->snapshots, snap); } - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); cleanup: return snapshot; @@ -1748,8 +1746,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, goto cleanup; } - if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT, flags) < 0) goto cleanup; @@ -1921,11 +1918,10 @@ qemuSnapshotRevert(virDomainObjPtr vm, } } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_START) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_START) < 0) goto endjob; rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) { /* XXX resume domain if it was running before the @@ -2042,7 +2038,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, if (qemuSnapshotRevertInactive(driver, vm, snap) < 0) { qemuDomainRemoveInactive(driver, vm); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); goto cleanup; } @@ -2067,7 +2063,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, virDomainAuditStart(vm, "from-snapshot", rc >= 0); if (rc < 0) { qemuDomainRemoveInactive(driver, vm); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); goto cleanup; } detail = VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT; @@ -2104,7 +2100,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, ret = 0; endjob: - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: if (ret == 0) { @@ -2188,7 +2184,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY | VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot))) @@ -2261,7 +2257,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: return ret; diff --git a/tests/qemuhotplugtest.c b/tests/qemuhotplugtest.c index 1e18820a2b..b95335eca7 100644 --- a/tests/qemuhotplugtest.c +++ b/tests/qemuhotplugtest.c @@ -457,7 +457,7 @@ testQemuHotplugCpuPrepare(const char *test, priv->mon = qemuMonitorTestGetMonitor(data->mon); virObjectUnlock(priv->mon); - if (qemuDomainRefreshVcpuInfo(&driver, data->vm, 0, false) < 0) + if (qemuDomainRefreshVcpuInfo(data->vm, 0, false) < 0) goto error; return data; -- 2.25.1

References to `qemuDomainObjPrivatePtr` in qemu_domainjob were removed as it is a qemu-hypervisor specific pointer. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 15 +- src/qemu/qemu_checkpoint.c | 12 +- src/qemu/qemu_domain.c | 20 +- src/qemu/qemu_domainjob.c | 313 ++++++++++---------- src/qemu/qemu_domainjob.h | 34 ++- src/qemu/qemu_driver.c | 568 ++++++++++++++++++++++--------------- src/qemu/qemu_migration.c | 23 +- src/qemu/qemu_process.c | 42 +-- src/qemu/qemu_snapshot.c | 25 +- 9 files changed, 589 insertions(+), 463 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 7e5926250a..4e606c252f 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -572,7 +572,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, virDomainBackupDefFree(priv->backup); priv->backup = NULL; - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); } @@ -740,13 +740,14 @@ qemuBackupBegin(virDomainObjPtr vm, * infrastructure for async jobs. We'll allow standard modify-type jobs * as the interlocking of conflicting operations is handled on the block * job level */ - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_BACKUP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_BACKUP, VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0) return -1; - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MODIFY))); + qemuDomainObjSetAsyncJobMask(&priv->job, + (QEMU_JOB_DEFAULT_MASK | + JOB_MASK(QEMU_JOB_SUSPEND) | + JOB_MASK(QEMU_JOB_MODIFY))); jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; if (!virDomainObjIsActive(vm)) { @@ -877,9 +878,9 @@ qemuBackupBegin(virDomainObjPtr vm, def = g_steal_pointer(&priv->backup); if (ret == 0) - qemuDomainObjReleaseAsyncJob(vm); + qemuDomainObjReleaseAsyncJob(&priv->job); else - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); return ret; } diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c index b90410aa20..e9547da555 100644 --- a/src/qemu/qemu_checkpoint.c +++ b/src/qemu/qemu_checkpoint.c @@ -539,7 +539,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, /* Unlike snapshots, the RNG schema already ensured a sane filename. */ /* We are going to modify the domain below. */ - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return NULL; if (redefine) { @@ -561,7 +561,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, checkpoint = virGetDomainCheckpoint(domain, chk->def->name); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return checkpoint; } @@ -588,7 +588,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -697,7 +697,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -781,7 +781,7 @@ qemuCheckpointDelete(virDomainObjPtr vm, VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY | VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (!metadata_only) { @@ -849,6 +849,6 @@ qemuCheckpointDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 7f56720011..cc89dec3b4 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2994,7 +2994,7 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf, if (priv->lockState) virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState); - if (qemuDomainObjPrivateXMLFormatJob(buf, vm) < 0) + if (qemuDomainObjPrivateXMLFormatJob(buf, vm, &priv->job) < 0) return -1; if (priv->fakeReboot) @@ -3653,7 +3653,7 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, priv->lockState = virXPathString("string(./lockstate)", ctxt); - if (qemuDomainObjPrivateXMLParseJob(vm, ctxt) < 0) + if (qemuDomainObjPrivateXMLParseJob(vm, ctxt, &priv->job) < 0) goto error; priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1; @@ -6097,12 +6097,12 @@ qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj, if (asyncJob != QEMU_ASYNC_JOB_NONE) { int ret; - if ((ret = qemuDomainObjBeginNestedJob(obj, asyncJob)) < 0) + if ((ret = qemuDomainObjBeginNestedJob(obj, &priv->job, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); - qemuDomainObjEndJob(obj); + qemuDomainObjEndJob(obj, &priv->job); return -1; } } else if (priv->job.asyncOwner == virThreadSelfID()) { @@ -6147,7 +6147,7 @@ qemuDomainObjExitMonitorInternal(virDomainObjPtr obj) priv->mon = NULL; if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjEndJob(obj); + qemuDomainObjEndJob(obj, &priv->job); } void qemuDomainObjEnterMonitor(virDomainObjPtr obj) @@ -7291,13 +7291,14 @@ qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, virDomainObjPtr vm) { bool haveJob; + qemuDomainObjPrivatePtr priv = vm->privateData; - haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactive(driver, vm); if (haveJob) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -7312,13 +7313,14 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, virDomainObjPtr vm) { bool haveJob; + qemuDomainObjPrivatePtr priv = vm->privateData; - haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactiveLocked(driver, vm); if (haveJob) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index bb260ccb2e..ecd694958c 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -18,7 +18,6 @@ #include <config.h> -#include "qemu_domain.h" #include "qemu_migration.h" #include "qemu_domainjob.h" #include "viralloc.h" @@ -176,26 +175,24 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) } int -qemuDomainObjRestoreJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job) +qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, + qemuDomainJobObjPtr oldJob) { - qemuDomainObjPrivatePtr priv = obj->privateData; + memset(oldJob, 0, sizeof(*oldJob)); + oldJob->active = job->active; + oldJob->owner = job->owner; + oldJob->asyncJob = job->asyncJob; + oldJob->asyncOwner = job->asyncOwner; + oldJob->phase = job->phase; + oldJob->privateData = g_steal_pointer(&job->privateData); + oldJob->apiFlags = job->apiFlags; - memset(job, 0, sizeof(*job)); - job->active = priv->job.active; - job->owner = priv->job.owner; - job->asyncJob = priv->job.asyncJob; - job->asyncOwner = priv->job.asyncOwner; - job->phase = priv->job.phase; - job->privateData = g_steal_pointer(&priv->job.privateData); - job->apiFlags = priv->job.apiFlags; - - if (!(priv->job.privateData = priv->job.cb->jobcb->allocJobPrivate())) + if (!(job->privateData = job->cb->jobcb->allocJobPrivate())) return -1; - job->cb = priv->job.cb; + oldJob->cb = job->cb; - qemuDomainObjResetJob(&priv->job); - qemuDomainObjResetAsyncJob(&priv->job); + qemuDomainObjResetJob(job); + qemuDomainObjResetAsyncJob(job); return 0; } @@ -218,65 +215,61 @@ qemuDomainTrackJob(qemuDomainJob job) void qemuDomainObjSetJobPhase(virDomainObjPtr obj, + qemuDomainJobObjPtr job, int phase) { - qemuDomainObjPrivatePtr priv = obj->privateData; unsigned long long me = virThreadSelfID(); - if (!priv->job.asyncJob) + if (!job->asyncJob) return; VIR_DEBUG("Setting '%s' phase to '%s'", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase)); + qemuDomainAsyncJobTypeToString(job->asyncJob), + qemuDomainAsyncJobPhaseToString(job->asyncJob, phase)); - if (priv->job.asyncOwner && me != priv->job.asyncOwner) { + if (job->asyncOwner && me != job->asyncOwner) { VIR_WARN("'%s' async job is owned by thread %llu", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - priv->job.asyncOwner); + qemuDomainAsyncJobTypeToString(job->asyncJob), + job->asyncOwner); } - priv->job.phase = phase; - priv->job.asyncOwner = me; - priv->job.cb->saveStatus(obj); + job->phase = phase; + job->asyncOwner = me; + job->cb->saveStatus(obj); } void -qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, +qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job, unsigned long long allowedJobs) { - qemuDomainObjPrivatePtr priv = obj->privateData; - - if (!priv->job.asyncJob) + if (!job->asyncJob) return; - priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY); + job->mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY); } void -qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj) +qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjResetJob(&priv->job); - qemuDomainObjResetAsyncJob(&priv->job); - priv->job.cb->saveStatus(obj); + if (job->active == QEMU_JOB_ASYNC_NESTED) + qemuDomainObjResetJob(job); + qemuDomainObjResetAsyncJob(job); + job->cb->saveStatus(obj); } void -qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj) +qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - VIR_DEBUG("Releasing ownership of '%s' async job", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + qemuDomainAsyncJobTypeToString(job->asyncJob)); - if (priv->job.asyncOwner != virThreadSelfID()) { + if (job->asyncOwner != virThreadSelfID()) { VIR_WARN("'%s' async job is owned by thread %llu", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - priv->job.asyncOwner); + qemuDomainAsyncJobTypeToString(job->asyncJob), + job->asyncOwner); } - priv->job.asyncOwner = 0; + job->asyncOwner = 0; } static bool @@ -336,7 +329,6 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, bool nowait) { - qemuDomainObjPrivatePtr priv = obj->privateData; unsigned long long now; unsigned long long then; bool nested = job == QEMU_JOB_ASYNC_NESTED; @@ -354,85 +346,85 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, qemuDomainAgentJobTypeToString(agentJob), qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAgentJobTypeToString(priv->job.agentActive), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + qemuDomainJobTypeToString(jobObj->active), + qemuDomainAgentJobTypeToString(jobObj->agentActive), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); if (virTimeMillisNow(&now) < 0) return -1; - priv->job.cb->jobcb->increaseJobsQueued(obj); + jobObj->cb->jobcb->increaseJobsQueued(obj); then = now + QEMU_JOB_WAIT_TIME; retry: if ((!async && job != QEMU_JOB_DESTROY) && - priv->job.cb->jobcb->getMaxQueuedJobs(obj) && - priv->job.cb->jobcb->getJobsQueued(obj) > priv->job.cb->jobcb->getMaxQueuedJobs(obj)) { + jobObj->cb->jobcb->getMaxQueuedJobs(obj) && + jobObj->cb->jobcb->getJobsQueued(obj) > jobObj->cb->jobcb->getMaxQueuedJobs(obj)) { goto error; } - while (!nested && !qemuDomainNestedJobAllowed(&priv->job, job)) { + while (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) { if (nowait) goto cleanup; VIR_DEBUG("Waiting for async job (vm=%p name=%s)", obj, obj->def->name); - if (virCondWaitUntil(&priv->job.asyncCond, &obj->parent.lock, then) < 0) + if (virCondWaitUntil(&jobObj->asyncCond, &obj->parent.lock, then) < 0) goto error; } - while (!qemuDomainObjCanSetJob(&priv->job, job, agentJob)) { + while (!qemuDomainObjCanSetJob(jobObj, job, agentJob)) { if (nowait) goto cleanup; VIR_DEBUG("Waiting for job (vm=%p name=%s)", obj, obj->def->name); - if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0) + if (virCondWaitUntil(&jobObj->cond, &obj->parent.lock, then) < 0) goto error; } /* No job is active but a new async job could have been started while obj * was unlocked, so we need to recheck it. */ - if (!nested && !qemuDomainNestedJobAllowed(&priv->job, job)) + if (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) goto retry; ignore_value(virTimeMillisNow(&now)); if (job) { - qemuDomainObjResetJob(&priv->job); + qemuDomainObjResetJob(jobObj); if (job != QEMU_JOB_ASYNC) { VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - priv->job.active = job; - priv->job.owner = virThreadSelfID(); - priv->job.ownerAPI = virThreadJobGet(); - priv->job.started = now; + jobObj->active = job; + jobObj->owner = virThreadSelfID(); + jobObj->ownerAPI = virThreadJobGet(); + jobObj->started = now; } else { VIR_DEBUG("Started async job: %s (vm=%p name=%s)", qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); - qemuDomainObjResetAsyncJob(&priv->job); - priv->job.cb->jobcb->currentJobInfoInit(&priv->job, now); - priv->job.asyncJob = asyncJob; - priv->job.asyncOwner = virThreadSelfID(); - priv->job.asyncOwnerAPI = virThreadJobGet(); - priv->job.asyncStarted = now; + qemuDomainObjResetAsyncJob(jobObj); + jobObj->cb->jobcb->currentJobInfoInit(jobObj, now); + jobObj->asyncJob = asyncJob; + jobObj->asyncOwner = virThreadSelfID(); + jobObj->asyncOwnerAPI = virThreadJobGet(); + jobObj->asyncStarted = now; } } if (agentJob) { - qemuDomainObjResetAgentJob(&priv->job); + qemuDomainObjResetAgentJob(jobObj); VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)", qemuDomainAgentJobTypeToString(agentJob), obj, obj->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); - priv->job.agentActive = agentJob; - priv->job.agentOwner = virThreadSelfID(); - priv->job.agentOwnerAPI = virThreadJobGet(); - priv->job.agentStarted = now; + qemuDomainJobTypeToString(jobObj->active), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); + jobObj->agentActive = agentJob; + jobObj->agentOwner = virThreadSelfID(); + jobObj->agentOwnerAPI = virThreadJobGet(); + jobObj->agentStarted = now; } if (qemuDomainTrackJob(job)) @@ -442,12 +434,12 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, error: ignore_value(virTimeMillisNow(&now)); - if (priv->job.active && priv->job.started) - duration = now - priv->job.started; - if (priv->job.agentActive && priv->job.agentStarted) - agentDuration = now - priv->job.agentStarted; - if (priv->job.asyncJob && priv->job.asyncStarted) - asyncDuration = now - priv->job.asyncStarted; + if (jobObj->active && jobObj->started) + duration = now - jobObj->started; + if (jobObj->agentActive && jobObj->agentStarted) + agentDuration = now - jobObj->agentStarted; + if (jobObj->asyncJob && jobObj->asyncStarted) + asyncDuration = now - jobObj->asyncStarted; VIR_WARN("Cannot start job (%s, %s, %s) for domain %s; " "current job is (%s, %s, %s) " @@ -457,24 +449,24 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, qemuDomainAgentJobTypeToString(agentJob), qemuDomainAsyncJobTypeToString(asyncJob), obj->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAgentJobTypeToString(priv->job.agentActive), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - priv->job.owner, NULLSTR(priv->job.ownerAPI), - priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI), - priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI), - priv->job.apiFlags, + qemuDomainJobTypeToString(jobObj->active), + qemuDomainAgentJobTypeToString(jobObj->agentActive), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + jobObj->owner, NULLSTR(jobObj->ownerAPI), + jobObj->agentOwner, NULLSTR(jobObj->agentOwnerAPI), + jobObj->asyncOwner, NULLSTR(jobObj->asyncOwnerAPI), + jobObj->apiFlags, duration / 1000, agentDuration / 1000, asyncDuration / 1000); if (job) { - if (nested || qemuDomainNestedJobAllowed(&priv->job, job)) - blocker = priv->job.ownerAPI; + if (nested || qemuDomainNestedJobAllowed(jobObj, job)) + blocker = jobObj->ownerAPI; else - blocker = priv->job.asyncOwnerAPI; + blocker = jobObj->asyncOwnerAPI; } if (agentJob) - agentBlocker = priv->job.agentOwnerAPI; + agentBlocker = jobObj->agentOwnerAPI; if (errno == ETIMEDOUT) { if (blocker && agentBlocker) { @@ -497,8 +489,8 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, _("cannot acquire state change lock")); } ret = -2; - } else if (priv->job.cb->jobcb->getMaxQueuedJobs(obj) && - priv->job.cb->jobcb->getJobsQueued(obj) > priv->job.cb->jobcb->getMaxQueuedJobs(obj)) { + } else if (jobObj->cb->jobcb->getMaxQueuedJobs(obj) && + jobObj->cb->jobcb->getJobsQueued(obj) > jobObj->cb->jobcb->getMaxQueuedJobs(obj)) { if (blocker && agentBlocker) { virReportError(VIR_ERR_OPERATION_FAILED, _("cannot acquire state change " @@ -528,7 +520,7 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, } cleanup: - priv->job.cb->jobcb->decreaseJobsQueued(obj); + jobObj->cb->jobcb->decreaseJobsQueued(obj); return ret; } @@ -541,11 +533,9 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, * Successful calls must be followed by EndJob eventually */ int qemuDomainObjBeginJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - if (qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, false) < 0) @@ -563,52 +553,48 @@ int qemuDomainObjBeginJob(virDomainObjPtr obj, */ int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAgentJob agentJob) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_NONE, agentJob, QEMU_ASYNC_JOB_NONE, false); } int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) { - qemuDomainObjPrivatePtr priv = obj->privateData; - - if (qemuDomainObjBeginJobInternal(obj, &priv->job, QEMU_JOB_ASYNC, + if (qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC, QEMU_AGENT_JOB_NONE, asyncJob, false) < 0) return -1; - priv->job.cb->jobcb->setJobInfoOperation(&priv->job, operation); - priv->job.apiFlags = apiFlags; + jobObj->cb->jobcb->setJobInfoOperation(jobObj, operation); + jobObj->apiFlags = apiFlags; return 0; } int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob) { - qemuDomainObjPrivatePtr priv = obj->privateData; - - if (asyncJob != priv->job.asyncJob) { + if (asyncJob != jobObj->asyncJob) { virReportError(VIR_ERR_INTERNAL_ERROR, _("unexpected async job %d type expected %d"), - asyncJob, priv->job.asyncJob); + asyncJob, jobObj->asyncJob); return -1; } - if (priv->job.asyncOwner != virThreadSelfID()) { + if (jobObj->asyncOwner != virThreadSelfID()) { VIR_WARN("This thread doesn't seem to be the async job owner: %llu", - priv->job.asyncOwner); + jobObj->asyncOwner); } - return qemuDomainObjBeginJobInternal(obj, &priv->job, + return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC_NESTED, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, @@ -619,6 +605,7 @@ qemuDomainObjBeginNestedJob(virDomainObjPtr obj, * qemuDomainObjBeginJobNowait: * * @obj: domain object + * @jobObj: qemuDomainJobObjPtr * @job: qemuDomainJob to start * * Acquires job for a domain object which must be locked before @@ -629,11 +616,9 @@ qemuDomainObjBeginNestedJob(virDomainObjPtr obj, */ int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - return qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, true); @@ -646,104 +631,101 @@ qemuDomainObjBeginJobNowait(virDomainObjPtr obj, * earlier qemuDomainBeginJob() call */ void -qemuDomainObjEndJob(virDomainObjPtr obj) +qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJob job = priv->job.active; + qemuDomainJob job = jobObj->active; - priv->job.cb->jobcb->decreaseJobsQueued(obj); + jobObj->cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetJob(&priv->job); + qemuDomainObjResetJob(jobObj); if (qemuDomainTrackJob(job)) - priv->job.cb->saveStatus(obj); + jobObj->cb->saveStatus(obj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ - virCondBroadcast(&priv->job.cond); + virCondBroadcast(&jobObj->cond); } void -qemuDomainObjEndAgentJob(virDomainObjPtr obj) +qemuDomainObjEndAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainAgentJob agentJob = priv->job.agentActive; + qemuDomainAgentJob agentJob = jobObj->agentActive; - priv->job.cb->jobcb->decreaseJobsQueued(obj); + jobObj->cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)", qemuDomainAgentJobTypeToString(agentJob), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetAgentJob(&priv->job); + qemuDomainObjResetAgentJob(jobObj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ - virCondBroadcast(&priv->job.cond); + virCondBroadcast(&jobObj->cond); } void -qemuDomainObjEndAsyncJob(virDomainObjPtr obj) +qemuDomainObjEndAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = obj->privateData; - - priv->job.cb->jobcb->decreaseJobsQueued(obj); + jobObj->cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetAsyncJob(&priv->job); - priv->job.cb->saveStatus(obj); - virCondBroadcast(&priv->job.asyncCond); + qemuDomainObjResetAsyncJob(jobObj); + jobObj->cb->saveStatus(obj); + virCondBroadcast(&jobObj->asyncCond); } void -qemuDomainObjAbortAsyncJob(virDomainObjPtr obj) +qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(job->asyncJob), obj, obj->def->name); - priv->job.abortJob = true; + job->abortJob = true; virDomainObjBroadcast(obj); } int qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm) + virDomainObjPtr vm, + qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = vm->privateData; g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); - qemuDomainJob job = priv->job.active; + qemuDomainJob job = jobObj->active; if (!qemuDomainTrackJob(job)) job = QEMU_JOB_NONE; if (job == QEMU_JOB_NONE && - priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) + jobObj->asyncJob == QEMU_ASYNC_JOB_NONE) return 0; virBufferAsprintf(&attrBuf, " type='%s' async='%s'", qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); - if (priv->job.phase) { + if (jobObj->phase) { virBufferAsprintf(&attrBuf, " phase='%s'", - qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, - priv->job.phase)); + qemuDomainAsyncJobPhaseToString(jobObj->asyncJob, + jobObj->phase)); } - if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE) - virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags); + if (jobObj->asyncJob != QEMU_ASYNC_JOB_NONE) + virBufferAsprintf(&attrBuf, " flags='0x%lx'", jobObj->apiFlags); - if (priv->job.cb->jobcb->formatJob(&childBuf, &priv->job, vm) < 0) + if (jobObj->cb->jobcb->formatJob(&childBuf, jobObj, vm) < 0) return -1; virXMLFormatElement(buf, "job", &attrBuf, &childBuf); @@ -754,10 +736,9 @@ qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, int qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - xmlXPathContextPtr ctxt) + xmlXPathContextPtr ctxt, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobObjPtr job = &priv->job; VIR_XPATH_NODE_AUTORESTORE(ctxt) g_autofree char *tmp = NULL; @@ -773,7 +754,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, return -1; } VIR_FREE(tmp); - priv->job.active = type; + job->active = type; } if ((tmp = virXPathString("string(@async)", ctxt))) { @@ -785,11 +766,11 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, return -1; } VIR_FREE(tmp); - priv->job.asyncJob = async; + job->asyncJob = async; if ((tmp = virXPathString("string(@phase)", ctxt))) { - priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp); - if (priv->job.phase < 0) { + job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp); + if (job->phase < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Unknown job phase %s"), tmp); return -1; @@ -798,12 +779,12 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, } } - if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) { + if (virXPathULongHex("string(@flags)", ctxt, &job->apiFlags) == -2) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid job flags")); return -1; } - if (priv->job.cb->jobcb->parseJob(ctxt, job, vm) < 0) + if (job->cb->jobcb->parseJob(ctxt, job, vm) < 0) return -1; return 0; diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 32ff01009d..f7e5cfa1fd 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -182,35 +182,45 @@ int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, const char *phase); int qemuDomainObjBeginJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAgentJob agentJob) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; -void qemuDomainObjEndJob(virDomainObjPtr obj); -void qemuDomainObjEndAgentJob(virDomainObjPtr obj); -void qemuDomainObjEndAsyncJob(virDomainObjPtr obj); -void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj); +void qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj); +void qemuDomainObjEndAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj); +void qemuDomainObjEndAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj); +void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job); void qemuDomainObjSetJobPhase(virDomainObjPtr obj, + qemuDomainJobObjPtr job, int phase); -void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, +void qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job, unsigned long long allowedJobs); -int qemuDomainObjRestoreJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job); -void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj); -void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj); +int qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, + qemuDomainJobObjPtr oldJob); +void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job); +void qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job); bool qemuDomainTrackJob(qemuDomainJob job); @@ -224,8 +234,10 @@ bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob); int qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm); + virDomainObjPtr vm, + qemuDomainJobObjPtr jobObj); int qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - xmlXPathContextPtr ctxt); + xmlXPathContextPtr ctxt, + qemuDomainJobObjPtr job); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 974f71b22f..c0b986cddf 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1811,7 +1811,7 @@ static int qemuDomainSuspend(virDomainPtr dom) cfg = virQEMUDriverGetConfig(driver); priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_SUSPEND) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_SUSPEND) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1838,7 +1838,7 @@ static int qemuDomainSuspend(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -1850,6 +1850,7 @@ static int qemuDomainSuspend(virDomainPtr dom) static int qemuDomainResume(virDomainPtr dom) { virQEMUDriverPtr driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; int ret = -1; int state; @@ -1860,11 +1861,12 @@ static int qemuDomainResume(virDomainPtr dom) return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1896,7 +1898,7 @@ static int qemuDomainResume(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -1912,10 +1914,11 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, { int ret = -1; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int agentFlag = isReboot ? QEMU_AGENT_SHUTDOWN_REBOOT : QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1933,7 +1936,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -1948,7 +1951,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1964,7 +1967,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -2043,11 +2046,13 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, qemuAgentPtr agent; int ret = -1; int agentFlag = QEMU_AGENT_SHUTDOWN_REBOOT; + qemuDomainObjPrivatePtr priv = vm->privateData; if (!isReboot) agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (!qemuDomainAgentAvailable(vm, agentForced)) @@ -2062,7 +2067,7 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -2075,7 +2080,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -2088,7 +2093,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -2161,16 +2166,17 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainResetEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemReset(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) @@ -2183,7 +2189,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_CRASHED); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2254,7 +2260,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, endjob: if (ret == 0) qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2324,11 +2330,12 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2390,7 +2397,6 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, } if (def) { - priv = vm->privateData; qemuDomainObjEnterMonitor(vm); r = qemuMonitorSetBalloon(priv->mon, newmem); if (qemuDomainObjExitMonitor(vm) < 0 || r < 0) @@ -2415,7 +2421,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2450,11 +2456,12 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2500,7 +2507,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2523,7 +2530,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2535,7 +2542,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2582,7 +2589,7 @@ static int qemuDomainSendKey(virDomainPtr domain, if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2594,7 +2601,7 @@ static int qemuDomainSendKey(virDomainPtr domain, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2764,7 +2771,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SAVE, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SAVE, VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0) goto cleanup; @@ -2858,7 +2865,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virErrorRestore(&save_err); } } - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); if (ret == 0) qemuDomainRemoveInactiveJob(driver, vm); @@ -3271,10 +3278,12 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainCoreDumpWithFormatEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) goto cleanup; @@ -3341,7 +3350,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } } - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); if (ret == 0 && flags & VIR_DUMP_CRASH) qemuDomainRemoveInactiveJob(driver, vm); @@ -3391,7 +3400,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -3465,7 +3474,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (unlink_tmp) unlink(tmp); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -3506,13 +3515,14 @@ processWatchdogEvent(virQEMUDriverPtr driver, g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); g_autofree char *dumpfile = getAutoDumpPath(driver, vm); unsigned int flags = VIR_DUMP_MEMORY_ONLY; + qemuDomainObjPrivatePtr priv = vm->privateData; if (!dumpfile) return; switch (action) { case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) { return; @@ -3540,7 +3550,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); } static int @@ -3589,7 +3599,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, bool removeInactive = false; unsigned long flags = VIR_DUMP_MEMORY_ONLY; - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) return; @@ -3655,7 +3665,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); if (removeInactive) qemuDomainRemoveInactiveJob(driver, vm); } @@ -3667,12 +3677,13 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, const char *devAlias) { g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDeviceDef dev; VIR_DEBUG("Removing device %s from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -3695,7 +3706,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, devAlias); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -3910,7 +3921,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm, "from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -3992,7 +4003,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virNetDevRxFilterFree(hostFilter); @@ -4038,7 +4049,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, memset(&dev, 0, sizeof(dev)); } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4079,7 +4090,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4091,8 +4102,9 @@ processBlockJobEvent(virDomainObjPtr vm, { virDomainDiskDefPtr disk; g_autoptr(qemuBlockJobData) job = NULL; + qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4117,7 +4129,7 @@ processBlockJobEvent(virDomainObjPtr vm, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4125,7 +4137,9 @@ static void processJobStatusChangeEvent(virDomainObjPtr vm, qemuBlockJobDataPtr job) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + qemuDomainObjPrivatePtr priv = vm->privateData; + + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4136,7 +4150,7 @@ processJobStatusChangeEvent(virDomainObjPtr vm, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4182,7 +4196,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, endjob: qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4421,6 +4435,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, bool hotpluggable = !!(flags & VIR_DOMAIN_VCPU_HOTPLUGGABLE); bool useAgent = !!(flags & VIR_DOMAIN_VCPU_GUEST); int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | @@ -4431,15 +4446,18 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainSetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; if (useAgent) { - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; } else { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; } @@ -4456,9 +4474,9 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, endjob: if (useAgent) - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); else - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -4569,6 +4587,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, virBitmapPtr pcpumap = NULL; virDomainVcpuDefPtr vcpuinfo = NULL; g_autoptr(virQEMUDriverConfig) cfg = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -4578,10 +4597,12 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -4620,7 +4641,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -4709,13 +4730,14 @@ qemuDomainPinEmulator(virDomainPtr dom, if (virDomainPinEmulatorEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + priv = vm->privateData; + + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) goto endjob; - priv = vm->privateData; if (!(pcpumap = virBitmapNewData(cpumap, maplen))) goto endjob; @@ -4776,7 +4798,7 @@ qemuDomainPinEmulator(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: if (cgroup_emulator) @@ -4878,6 +4900,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) qemuAgentPtr agent; int ncpuinfo = -1; size_t i; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | @@ -4887,6 +4910,8 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainGetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; @@ -4894,7 +4919,8 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) goto cleanup; if (flags & VIR_DOMAIN_VCPU_GUEST) { - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4912,7 +4938,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); if (ncpuinfo < 0) goto cleanup; @@ -4970,14 +4996,14 @@ static int qemuDomainGetIOThreadsLive(virDomainObjPtr vm, virDomainIOThreadInfoPtr **info) { - qemuDomainObjPrivatePtr priv; + qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorIOThreadInfoPtr *iothreads = NULL; virDomainIOThreadInfoPtr *info_ret = NULL; int niothreads = 0; size_t i; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4986,7 +5012,6 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm, goto endjob; } - priv = vm->privateData; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("IOThreads not supported with this binary")); @@ -5027,7 +5052,7 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm, ret = niothreads; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: if (info_ret) { @@ -5165,7 +5190,7 @@ qemuDomainPinIOThread(virDomainPtr dom, if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5254,7 +5279,7 @@ qemuDomainPinIOThread(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: if (cgroup_iothread) @@ -5615,7 +5640,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5702,7 +5727,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -6699,6 +6724,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, int ret = -1; int nsnapshots; int ncheckpoints; + qemuDomainObjPrivatePtr priv; g_autoptr(virQEMUDriverConfig) cfg = NULL; g_autofree char *nvram_path = NULL; @@ -6718,12 +6744,14 @@ qemuDomainUndefineFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + cfg = virQEMUDriverGetConfig(driver); if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!vm->persistent) { @@ -6819,7 +6847,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -7786,6 +7814,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; int ret = -1; virNWFilterReadLockFilterUpdates(); @@ -7793,10 +7822,12 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -7808,7 +7839,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -7853,7 +7884,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -7922,7 +7953,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainDefFree(vmdef); @@ -8100,15 +8131,18 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; int ret = -1; if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8120,7 +8154,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8135,15 +8169,18 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; int ret = -1; if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8155,7 +8192,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8198,11 +8235,13 @@ static int qemuDomainSetAutostart(virDomainPtr dom, g_autofree char *autostartLink = NULL; int ret = -1; g_autoptr(virQEMUDriverConfig) cfg = NULL; + qemuDomainObjPrivatePtr priv; if (!(vm = qemuDomainObjFromDomain(dom))) return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetAutostartEnsureACL(dom->conn, vm->def) < 0) goto cleanup; @@ -8216,7 +8255,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, autostart = (autostart != 0); if (vm->autostart != autostart) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name))) @@ -8254,7 +8293,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, vm->autostart = autostart; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } ret = 0; @@ -8362,7 +8401,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -8396,7 +8435,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8538,7 +8577,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; /* QEMU and LXC implementation are identical */ @@ -8569,7 +8608,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8792,7 +8831,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, } } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -8847,7 +8886,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virBitmapFree(nodeset); @@ -9001,7 +9040,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9043,7 +9082,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9072,17 +9111,17 @@ qemuDomainGetPerfEvents(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(def = virDomainObjGetOneDef(vm, flags))) goto endjob; - priv = vm->privateData; - for (i = 0; i < VIR_PERF_EVENT_LAST; i++) { bool perf_enabled; @@ -9104,7 +9143,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9278,7 +9317,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9512,7 +9551,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainDefFree(persistentDefCopy); @@ -9806,7 +9845,7 @@ qemuDomainBlockResize(virDomainPtr dom, if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -9851,7 +9890,7 @@ qemuDomainBlockResize(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9996,14 +10035,17 @@ qemuDomainBlockStats(virDomainPtr dom, qemuBlockStatsPtr blockstats = NULL; int ret = -1; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10026,7 +10068,7 @@ qemuDomainBlockStats(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10046,6 +10088,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, qemuBlockStatsPtr blockstats = NULL; int nstats; int ret = -1; + qemuDomainObjPrivatePtr priv; VIR_DEBUG("params=%p, flags=0x%x", params, flags); @@ -10057,10 +10100,12 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10113,7 +10158,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, *nparams = nstats; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(blockstats); @@ -10176,6 +10221,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, bool inboundSpecified = false, outboundSpecified = false; int actualType; bool qosSupported = true; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -10201,11 +10247,12 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10379,7 +10426,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virNetDevBandwidthFree(bandwidth); @@ -10542,6 +10589,7 @@ qemuDomainMemoryStats(virDomainPtr dom, unsigned int flags) { virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, -1); @@ -10549,15 +10597,17 @@ qemuDomainMemoryStats(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; ret = qemuDomainMemoryStatsInternal(vm, stats, nr_stats); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10647,6 +10697,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainMemoryPeekEnsureACL(dom->conn, vm->def) < 0) goto cleanup; @@ -10657,7 +10708,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10675,7 +10726,6 @@ qemuDomainMemoryPeek(virDomainPtr dom, qemuSecurityDomainSetPathLabel(driver, vm, tmp, false); - priv = vm->privateData; qemuDomainObjEnterMonitor(vm); if (flags == VIR_MEMORY_VIRTUAL) { if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) { @@ -10702,7 +10752,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FORCE_CLOSE(fd); @@ -10922,6 +10972,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; int ret = -1; virDomainDiskDefPtr disk; g_autoptr(virQEMUDriverConfig) cfg = NULL; @@ -10933,11 +10984,12 @@ qemuDomainGetBlockInfo(virDomainPtr dom, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(disk = virDomainDiskByName(vm->def, path, false))) { @@ -11009,7 +11061,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(entry); virDomainObjEndAPI(&vm); @@ -12595,7 +12647,7 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -12631,7 +12683,7 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm, ret = 0; cleanup: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -12725,7 +12777,7 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) VIR_DEBUG("Cancelling migration job at client request"); - qemuDomainObjAbortAsyncJob(vm); + qemuDomainObjAbortAsyncJob(vm, &priv->job); qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateCancel(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) @@ -12746,16 +12798,17 @@ static int qemuDomainAbortJob(virDomainPtr dom) if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_ABORT) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_ABORT) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; jobPriv = priv->job.privateData; switch (priv->job.asyncJob) { @@ -12818,7 +12871,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -12842,17 +12895,17 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - VIR_DEBUG("Setting migration downtime to %llums", downtime); if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_DOWNTIME)) { @@ -12877,7 +12930,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -12894,16 +12947,19 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, qemuMigrationParamsPtr migParams = NULL; int ret = -1; int rc; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -12929,7 +12985,7 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: qemuMigrationParamsFree(migParams); @@ -12954,17 +13010,17 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (!qemuMigrationCapsGet(vm, QEMU_MIGRATION_CAP_XBZRLE)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("Compressed migration is not supported by " @@ -12991,7 +13047,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13014,17 +13070,17 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (!qemuMigrationCapsGet(vm, QEMU_MIGRATION_CAP_XBZRLE)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("Compressed migration is not supported by " @@ -13055,7 +13111,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13102,7 +13158,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13145,7 +13201,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13160,9 +13216,10 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, g_autoptr(qemuMigrationParams) migParams = NULL; unsigned long long bw; int rc; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -13199,7 +13256,7 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, ret = 0; cleanup: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -13252,17 +13309,17 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("post-copy can only be started while " @@ -13284,7 +13341,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13978,17 +14035,17 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, if (!(vm = qemuDomainObjFromDomain(domain))) goto cleanup; + priv = vm->privateData; + if (virDomainQemuMonitorCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_CUSTOM_MONITOR, NULL); hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP); @@ -13999,7 +14056,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14317,7 +14374,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14413,7 +14470,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, qemuBlockJobStarted(job, vm); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: qemuBlockJobStartupFinalize(vm, job); @@ -14447,7 +14504,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14530,7 +14587,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, endjob: if (job && !async) qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14597,17 +14654,19 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, int ret = -1; qemuMonitorBlockJobInfo rawInfo; g_autoptr(qemuBlockJobData) job = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_BLOCK_JOB_INFO_BANDWIDTH_BYTES, -1); if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainGetBlockJobInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14635,7 +14694,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14654,6 +14713,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, virDomainObjPtr vm; unsigned long long speed = bandwidth; g_autoptr(qemuBlockJobData) job = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_BLOCK_JOB_SPEED_BANDWIDTH_BYTES, -1); @@ -14671,10 +14731,12 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14697,7 +14759,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14876,7 +14938,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -15152,7 +15214,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (need_unlink && virStorageFileUnlink(mirror) < 0) VIR_WARN("%s", _("unable to remove just-created copy target")); virStorageFileDeinit(mirror); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); qemuBlockJobStartupFinalize(vm, job); return ret; @@ -15376,7 +15438,7 @@ qemuDomainBlockCommit(virDomainPtr dom, if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -15592,7 +15654,7 @@ qemuDomainBlockCommit(virDomainPtr dom, virErrorRestore(&orig_err); } qemuBlockJobStartupFinalize(vm, job); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -15616,17 +15678,17 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (idx >= vm->def->ngraphics) { virReportError(VIR_ERR_INTERNAL_ERROR, _("No graphics backend with index %d"), idx); @@ -15664,7 +15726,7 @@ qemuDomainOpenGraphics(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -15732,14 +15794,14 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom, if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1], "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH)); if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); if (ret < 0) goto cleanup; @@ -15977,12 +16039,11 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; - priv = vm->privateData; - if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) goto endjob; @@ -16244,7 +16305,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(info.group_name); @@ -16288,7 +16349,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; /* the API check guarantees that only one of the definitions will be set */ @@ -16401,7 +16462,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(reply.group_name); @@ -16434,7 +16495,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16475,7 +16536,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, ret = n; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16497,6 +16558,7 @@ qemuDomainSetMetadata(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; g_autoptr(virQEMUDriverConfig) cfg = NULL; int ret = -1; @@ -16507,11 +16569,12 @@ qemuDomainSetMetadata(virDomainPtr dom, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; ret = virDomainObjSetMetadata(vm, type, metadata, key, uri, @@ -16524,7 +16587,7 @@ qemuDomainSetMetadata(virDomainPtr dom, virObjectEventStateQueue(driver->domainEventState, ev); } - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16633,7 +16696,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE)) return -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -16642,7 +16705,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, ret = qemuDomainProbeQMPCurrentMachine(vm, wakeupSupported); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -16652,9 +16715,11 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm, unsigned int target) { qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -16668,7 +16733,7 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -16758,24 +16823,24 @@ qemuDomainPMWakeup(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemWakeup(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16809,16 +16874,20 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, int ret = -1; char *result = NULL; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, NULL); if (!(vm = qemuDomainObjFromDomain(domain))) goto cleanup; + priv = vm->privateData; + if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16836,7 +16905,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, VIR_FREE(result); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16895,6 +16964,7 @@ qemuDomainFSTrim(virDomainPtr dom, { virDomainObjPtr vm; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, -1); @@ -16909,10 +16979,13 @@ qemuDomainFSTrim(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -16926,7 +16999,7 @@ qemuDomainFSTrim(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17080,9 +17153,11 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm, char **hostname) { qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17097,7 +17172,7 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -17112,8 +17187,9 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm, int n_leases; size_t i, j; int ret = -1; + qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17155,7 +17231,7 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -17211,6 +17287,7 @@ qemuDomainGetTime(virDomainPtr dom, unsigned int flags) { virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; qemuAgentPtr agent; int ret = -1; int rv; @@ -17220,10 +17297,13 @@ qemuDomainGetTime(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return ret; + priv = vm->privateData; + if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17242,7 +17322,7 @@ qemuDomainGetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17257,9 +17337,11 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm, bool rtcSync) { qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17273,7 +17355,7 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -17315,7 +17397,7 @@ qemuDomainSetTime(virDomainPtr dom, if (qemuDomainSetTimeAgent(vm, seconds, nseconds, rtcSync) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17335,7 +17417,7 @@ qemuDomainSetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17351,16 +17433,20 @@ qemuDomainFSFreeze(virDomainPtr dom, { virDomainObjPtr vm; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17369,7 +17455,7 @@ qemuDomainFSFreeze(virDomainPtr dom, ret = qemuSnapshotFSFreeze(vm, mountpoints, nmountpoints); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17385,6 +17471,7 @@ qemuDomainFSThaw(virDomainPtr dom, { virDomainObjPtr vm; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); @@ -17397,10 +17484,13 @@ qemuDomainFSThaw(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17409,7 +17499,7 @@ qemuDomainFSThaw(virDomainPtr dom, ret = qemuSnapshotFSThaw(vm, true); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -18597,6 +18687,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, virErrorPtr orig_err = NULL; virDomainObjPtr *vms = NULL; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; size_t nvms; virDomainStatsRecordPtr *tmpstats = NULL; bool enforce = !!(flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS); @@ -18644,6 +18735,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, virDomainStatsRecordPtr tmp = NULL; domflags = 0; vm = vms[i]; + priv = vm->privateData; virObjectLock(vm); @@ -18651,9 +18743,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, int rv; if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT) - rv = qemuDomainObjBeginJobNowait(vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJobNowait(vm, &priv->job, QEMU_JOB_QUERY); else - rv = qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY); if (rv == 0) domflags |= QEMU_DOMAIN_STATS_HAVE_JOB; @@ -18664,7 +18756,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, domflags |= QEMU_DOMAIN_STATS_BACKING; if (qemuDomainGetStats(conn, vm, stats, &tmp, domflags) < 0) { if (HAVE_JOB(domflags) && vm) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); virObjectUnlock(vm); goto cleanup; @@ -18674,7 +18766,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, tmpstats[nstats++] = tmp; if (HAVE_JOB(domflags)) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); virObjectUnlock(vm); } @@ -18720,8 +18812,10 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm, { int ret = -1; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) return ret; if (virDomainObjCheckActive(vm) < 0) @@ -18735,7 +18829,7 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -18819,19 +18913,22 @@ qemuDomainGetFSInfo(virDomainPtr dom, qemuAgentFSInfoPtr *agentinfo = NULL; int ret = -1; int nfs; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, ret); if (!(vm = qemuDomainObjFromDomain(dom))) return ret; + priv = vm->privateData; + if (virDomainGetFSInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; if ((nfs = qemuDomainGetFSInfoAgent(vm, &agentinfo)) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18840,7 +18937,7 @@ qemuDomainGetFSInfo(virDomainPtr dom, ret = virDomainFSInfoFormat(agentinfo, nfs, vm->def, info); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: g_free(agentinfo); @@ -18857,6 +18954,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, { virDomainObjPtr vm = NULL; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, -1); @@ -18864,6 +18962,8 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainInterfaceAddressesEnsureACL(dom->conn, vm->def) < 0) goto cleanup; @@ -18876,7 +18976,8 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, break; case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT: - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -18887,7 +18988,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); break; @@ -18916,6 +19017,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, { virDomainObjPtr vm; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ret = -1; int rv; @@ -18924,10 +19026,13 @@ qemuDomainSetUserPassword(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return ret; + priv = vm->privateData; + if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18947,7 +19052,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19070,6 +19175,7 @@ static int qemuDomainRename(virDomainPtr dom, virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, ret); @@ -19079,7 +19185,9 @@ static int qemuDomainRename(virDomainPtr dom, if (virDomainRenameEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + priv = vm->privateData; + + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { @@ -19126,7 +19234,7 @@ static int qemuDomainRename(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19206,6 +19314,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, qemuAgentPtr agent; qemuAgentCPUInfoPtr info = NULL; int ninfo = 0; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, ret); @@ -19213,10 +19322,13 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19235,7 +19347,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: VIR_FREE(info); @@ -19254,6 +19366,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, virBitmapPtr map = NULL; qemuAgentCPUInfoPtr info = NULL; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ninfo = 0; size_t i; int ret = -1; @@ -19271,10 +19384,12 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19320,7 +19435,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: VIR_FREE(info); @@ -19343,6 +19458,7 @@ qemuDomainSetVcpu(virDomainPtr dom, virBitmapPtr map = NULL; ssize_t lastvcpu; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -19364,10 +19480,12 @@ qemuDomainSetVcpu(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -19394,7 +19512,7 @@ qemuDomainSetVcpu(virDomainPtr dom, ret = qemuDomainSetVcpuInternal(driver, vm, def, persistentDef, map, !!state); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virBitmapFree(map); @@ -19426,7 +19544,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19463,7 +19581,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19521,7 +19639,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -19552,7 +19670,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19641,10 +19759,11 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm, int ret = -1; g_autofree char *tmp = NULL; int maxpar = 0; + qemuDomainObjPrivatePtr priv = vm->privateData; virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; qemuDomainObjEnterMonitor(vm); @@ -19664,7 +19783,7 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -19832,6 +19951,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, size_t nfs = 0; qemuAgentFSInfoPtr *agentfsinfo = NULL; size_t i; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); @@ -19841,10 +19961,12 @@ qemuDomainGetGuestInfo(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainGetGuestInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19892,10 +20014,10 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endagentjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); if (nfs > 0) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19906,7 +20028,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuAgentFSInfoFormatParams(agentfsinfo, nfs, vm->def, params, nparams, &maxparams); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } cleanup: diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6b2978f745..4fa2e4cf62 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2005,7 +2005,7 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, switch ((virMigrationJobPhase) priv->job.phase) { case VIR_MIGRATION_PHASE_BEGIN3: /* just forget we were about to migrate */ - qemuDomainObjDiscardAsyncJob(vm); + qemuDomainObjDiscardAsyncJob(vm, &priv->job); break; case VIR_MIGRATION_PHASE_PERFORM3_DONE: @@ -2015,7 +2015,7 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); /* clear the job and let higher levels decide what to do */ - qemuDomainObjDiscardAsyncJob(vm); + qemuDomainObjDiscardAsyncJob(vm, &priv->job); break; case VIR_MIGRATION_PHASE_PERFORM3: @@ -2204,6 +2204,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, unsigned long flags) { virQEMUDriverPtr driver = conn->privateData; + qemuDomainObjPrivatePtr priv = vm->privateData; char *xml = NULL; qemuDomainAsyncJob asyncJob; @@ -2213,7 +2214,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, goto cleanup; asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT; } else { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; asyncJob = QEMU_ASYNC_JOB_NONE; } @@ -2258,7 +2259,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, if (flags & VIR_MIGRATE_CHANGE_PROTECTION) qemuMigrationJobFinish(vm); else - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); goto cleanup; } @@ -2283,7 +2284,7 @@ qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver, if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) return; - qemuDomainObjDiscardAsyncJob(vm); + qemuDomainObjDiscardAsyncJob(vm, &priv->job); } static qemuProcessIncomingDefPtr @@ -5452,12 +5453,12 @@ qemuMigrationJobStart(virDomainObjPtr vm, JOB_MASK(QEMU_JOB_MIGRATION_OP); } - if (qemuDomainObjBeginAsyncJob(vm, job, op, apiFlags) < 0) + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, job, op, apiFlags) < 0) return -1; jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; - qemuDomainObjSetAsyncJobMask(vm, mask); + qemuDomainObjSetAsyncJobMask(&priv->job, mask); return 0; } @@ -5474,7 +5475,7 @@ qemuMigrationJobSetPhase(virDomainObjPtr vm, return; } - qemuDomainObjSetJobPhase(vm, phase); + qemuDomainObjSetJobPhase(vm, &priv->job, phase); } static void @@ -5487,7 +5488,8 @@ qemuMigrationJobStartPhase(virDomainObjPtr vm, static void qemuMigrationJobContinue(virDomainObjPtr vm) { - qemuDomainObjReleaseAsyncJob(vm); + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjReleaseAsyncJob(&priv->job); } static bool @@ -5513,7 +5515,8 @@ qemuMigrationJobIsActive(virDomainObjPtr vm, static void qemuMigrationJobFinish(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjEndAsyncJob(vm, &priv->job); } diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 5eadcd1a6c..b394bcbd3f 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -422,7 +422,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, if (vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY || vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -436,7 +436,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainAuditStop(vm, "destroyed"); qemuDomainRemoveInactive(driver, vm); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } ret = 0; @@ -467,7 +467,7 @@ qemuProcessFakeReboot(void *opaque) VIR_DEBUG("vm=%p", vm); virObjectLock(vm); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -505,7 +505,7 @@ qemuProcessFakeReboot(void *opaque) ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: priv->pausedShutdown = false; @@ -3642,9 +3642,10 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, priv->job.asyncOwnerAPI = virThreadJobGet(); priv->job.asyncStarted = now; - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MODIFY))); + qemuDomainObjSetAsyncJobMask(&priv->job, + (QEMU_JOB_DEFAULT_MASK | + JOB_MASK(QEMU_JOB_SUSPEND) | + JOB_MASK(QEMU_JOB_MODIFY))); /* We reset the job parameters for backup so that the job will look * active. This is possible because we are able to recover the state @@ -4571,11 +4572,13 @@ qemuProcessBeginJob(virDomainObjPtr vm, virDomainJobOperation operation, unsigned long apiFlags) { - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_START, + qemuDomainObjPrivatePtr priv = vm->privateData; + + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_START, operation, apiFlags) < 0) return -1; - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); + qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE); return 0; } @@ -4583,7 +4586,8 @@ qemuProcessBeginJob(virDomainObjPtr vm, void qemuProcessEndJob(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjEndAsyncJob(vm, &priv->job); } @@ -7291,7 +7295,7 @@ qemuProcessBeginStopJob(virDomainObjPtr vm, /* Wake up anything waiting on domain condition */ virDomainObjBroadcast(vm); - if (qemuDomainObjBeginJob(vm, job) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, job) < 0) goto cleanup; ret = 0; @@ -7332,7 +7336,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); if (asyncJob != QEMU_ASYNC_JOB_NONE) { - if (qemuDomainObjBeginNestedJob(vm, asyncJob) < 0) + if (qemuDomainObjBeginNestedJob(vm, &priv->job, asyncJob) < 0) goto cleanup; } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE && priv->job.asyncOwner == virThreadSelfID() && @@ -7638,7 +7642,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, endjob: if (asyncJob != QEMU_ASYNC_JOB_NONE) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virErrorRestore(&orig_err); @@ -7663,7 +7667,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, if (priv->job.asyncJob) { VIR_DEBUG("vm=%s has long-term job active, cancelling", dom->def->name); - qemuDomainObjDiscardAsyncJob(dom); + qemuDomainObjDiscardAsyncJob(dom, &priv->job); } VIR_DEBUG("Killing domain"); @@ -7681,7 +7685,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, qemuDomainRemoveInactive(driver, dom); - qemuDomainObjEndJob(dom); + qemuDomainObjEndJob(dom, &priv->job); virObjectEventStateQueue(driver->domainEventState, event); } @@ -8020,14 +8024,14 @@ qemuProcessReconnect(void *opaque) g_clear_object(&data->identity); VIR_FREE(data); - qemuDomainObjRestoreJob(obj, &oldjob); + priv = obj->privateData; + qemuDomainObjRestoreJob(&priv->job, &oldjob); if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; cfg = virQEMUDriverGetConfig(driver); - priv = obj->privateData; - if (qemuDomainObjBeginJob(obj, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(obj, &priv->job, QEMU_JOB_MODIFY) < 0) goto error; jobStarted = true; @@ -8254,7 +8258,7 @@ qemuProcessReconnect(void *opaque) if (jobStarted) { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactive(driver, obj); - qemuDomainObjEndJob(obj); + qemuDomainObjEndJob(obj, &priv->job); } else { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactiveJob(driver, obj); diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 292e360193..8d216bbdbd 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -1248,16 +1248,16 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) { int freeze; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) { - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); goto cleanup; } freeze = qemuSnapshotFSFreeze(vm, NULL, 0); - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); if (freeze < 0) { /* the helper reported the error */ @@ -1312,7 +1312,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* allow the migration job to be cancelled or the domain to be paused */ - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | + qemuDomainObjSetAsyncJobMask(&priv->job, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_MIGRATION_OP))); @@ -1342,7 +1342,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, memory_unlink = true; /* forbid any further manipulation */ - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_DEFAULT_MASK); + qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_DEFAULT_MASK); } /* the domain is now paused if a memory snapshot was requested */ @@ -1393,7 +1393,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, } if (thaw != 0 && - qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) >= 0 && + qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) >= 0 && virDomainObjIsActive(vm)) { if (qemuSnapshotFSThaw(vm, ret == 0 && thaw > 0) < 0) { /* helper reported the error, if it was needed */ @@ -1401,7 +1401,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, ret = -1; } - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); } virQEMUSaveDataFree(data); @@ -1544,11 +1544,11 @@ qemuSnapshotCreateXML(virDomainPtr domain, * a regular job, so we need to set the job mask to disallow query as * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SNAPSHOT, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SNAPSHOT, VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0) goto cleanup; - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); + qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE); if (redefine) { if (virDomainSnapshotRedefinePrep(vm, &def, &snap, @@ -1679,7 +1679,7 @@ qemuSnapshotCreateXML(virDomainPtr domain, virDomainSnapshotObjListRemove(vm->snapshots, snap); } - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); cleanup: return snapshot; @@ -2176,6 +2176,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, virDomainMomentObjPtr snap = NULL; virQEMUMomentRemove rem; virQEMUMomentReparent rep; + qemuDomainObjPrivatePtr priv = vm->privateData; bool metadata_only = !!(flags & VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY); int external = 0; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); @@ -2184,7 +2185,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY | VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot))) @@ -2257,7 +2258,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: return ret; -- 2.25.1

After removing all the external dependencies that `qemu_domainjob` had, we move it to `hypervisor/virdomainjob` and thus create hypervisor-agnostic jobs. This change involved moving the file, and making corresponding name changes to funcitons and structures. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- po/POTFILES.in | 2 +- po/libvirt.pot | 34 +- src/hypervisor/meson.build | 1 + .../virdomainjob.c} | 395 ++++++------ src/hypervisor/virdomainjob.h | 243 +++++++ src/libvirt_private.syms | 28 + src/qemu/meson.build | 1 - src/qemu/qemu_backup.c | 46 +- src/qemu/qemu_backup.h | 2 +- src/qemu/qemu_block.c | 20 +- src/qemu/qemu_block.h | 12 +- src/qemu/qemu_blockjob.c | 32 +- src/qemu/qemu_checkpoint.c | 22 +- src/qemu/qemu_domain.c | 120 ++-- src/qemu/qemu_domain.h | 12 +- src/qemu/qemu_domainjob.h | 243 ------- src/qemu/qemu_driver.c | 592 +++++++++--------- src/qemu/qemu_hotplug.c | 44 +- src/qemu/qemu_hotplug.h | 8 +- src/qemu/qemu_migration.c | 316 +++++----- src/qemu/qemu_migration.h | 8 +- src/qemu/qemu_migration_cookie.c | 2 +- src/qemu/qemu_migration_params.c | 4 +- src/qemu/qemu_process.c | 222 +++---- src/qemu/qemu_process.h | 22 +- src/qemu/qemu_saveimage.c | 4 +- src/qemu/qemu_saveimage.h | 6 +- src/qemu/qemu_snapshot.c | 76 +-- 28 files changed, 1273 insertions(+), 1244 deletions(-) rename src/{qemu/qemu_domainjob.c => hypervisor/virdomainjob.c} (60%) create mode 100644 src/hypervisor/virdomainjob.h delete mode 100644 src/qemu/qemu_domainjob.h diff --git a/po/POTFILES.in b/po/POTFILES.in index 3d6c20c55f..e844ed4006 100644 --- a/po/POTFILES.in +++ b/po/POTFILES.in @@ -81,6 +81,7 @@ @SRCDIR@src/hypervisor/domain_cgroup.c @SRCDIR@src/hypervisor/domain_driver.c @SRCDIR@src/hypervisor/virclosecallbacks.c +@SRCDIR@src/hypervisor/virdomainjob.c @SRCDIR@src/hypervisor/virhostdev.c @SRCDIR@src/interface/interface_backend_netcf.c @SRCDIR@src/interface/interface_backend_udev.c @@ -153,7 +154,6 @@ @SRCDIR@src/qemu/qemu_dbus.c @SRCDIR@src/qemu/qemu_domain.c @SRCDIR@src/qemu/qemu_domain_address.c -@SRCDIR@src/qemu/qemu_domainjob.c @SRCDIR@src/qemu/qemu_driver.c @SRCDIR@src/qemu/qemu_extdevice.c @SRCDIR@src/qemu/qemu_firmware.c diff --git a/po/libvirt.pot b/po/libvirt.pot index 92e77bf22b..26a43959b7 100644 --- a/po/libvirt.pot +++ b/po/libvirt.pot @@ -12344,7 +12344,7 @@ msgstr "" msgid "Invalid ipv6 setting '%s' in network '%s' NAT" msgstr "" -#: src/qemu/qemu_domainjob.c:1426 +#: src/hypervisor/virdomainjob.c:784 msgid "Invalid job flags" msgstr "" @@ -23238,7 +23238,7 @@ msgstr "" msgid "Unknown architecture %s" msgstr "" -#: src/qemu/qemu_domainjob.c:1408 +#: src/hypervisor/virdomainjob.c:766 #, c-format msgid "Unknown async job type %s" msgstr "" @@ -23389,12 +23389,12 @@ msgstr "" msgid "Unknown job" msgstr "" -#: src/qemu/qemu_domainjob.c:1418 +#: src/hypervisor/virdomainjob.c:776 #, c-format msgid "Unknown job phase %s" msgstr "" -#: src/qemu/qemu_domainjob.c:1396 +#: src/hypervisor/virdomainjob.c:754 #, c-format msgid "Unknown job type %s" msgstr "" @@ -25838,50 +25838,50 @@ msgid "cannot abort migration in post-copy mode" msgstr "" #: src/libxl/libxl_domain.c:152 src/lxc/lxc_domain.c:126 -#: src/qemu/qemu_domainjob.c:1010 src/vz/vz_utils.c:623 +#: src/hypervisor/virdomainjob.c:520 src/vz/vz_utils.c:623 msgid "cannot acquire job mutex" msgstr "" #: src/libxl/libxl_domain.c:149 src/lxc/lxc_domain.c:123 -#: src/qemu/qemu_domainjob.c:980 src/vz/vz_utils.c:620 +#: src/hypervisor/virdomainjob.c:490 src/vz/vz_utils.c:620 msgid "cannot acquire state change lock" msgstr "" -#: src/qemu/qemu_domainjob.c:975 +#: src/hypervisor/virdomainjob.c:485 #, c-format msgid "cannot acquire state change lock (held by agent=%s)" msgstr "" -#: src/qemu/qemu_domainjob.c:999 +#: src/hypervisor/virdomainjob.c:509 #, c-format msgid "" "cannot acquire state change lock (held by agent=%s) due to max_queued limit" msgstr "" -#: src/qemu/qemu_domainjob.c:965 +#: src/hypervisor/virdomainjob.c:475 #, c-format msgid "cannot acquire state change lock (held by monitor=%s agent=%s)" msgstr "" -#: src/qemu/qemu_domainjob.c:987 +#: src/hypervisor/virdomainjob.c:497 #, c-format msgid "" "cannot acquire state change lock (held by monitor=%s agent=%s) due to " "max_queued limit" msgstr "" -#: src/qemu/qemu_domainjob.c:970 +#: src/hypervisor/virdomainjob.c:480 #, c-format msgid "cannot acquire state change lock (held by monitor=%s)" msgstr "" -#: src/qemu/qemu_domainjob.c:993 +#: src/hypervisor/virdomainjob.c:503 #, c-format msgid "" "cannot acquire state change lock (held by monitor=%s) due to max_queued limit" msgstr "" -#: src/qemu/qemu_domainjob.c:1005 +#: src/hypervisor/virdomainjob.c:514 msgid "cannot acquire state change lock due to max_queued limit" msgstr "" @@ -34693,7 +34693,7 @@ msgstr "" msgid "invalid iothreads count '%s'" msgstr "" -#: src/qemu/qemu_domainjob.c:684 +#: src/qemu/qemu_domain.c:496 msgid "invalid job statistics type" msgstr "" @@ -37902,11 +37902,11 @@ msgstr "" msgid "missing storage pool target path" msgstr "" -#: src/qemu/qemu_domainjob.c:1321 +#: src/qemu/qemu_domain.c:704 msgid "missing storage source format" msgstr "" -#: src/qemu/qemu_domainjob.c:1315 +#: src/qemu/qemu_domain.c:698 msgid "missing storage source type" msgstr "" @@ -44855,7 +44855,7 @@ msgstr "" msgid "unexpected address type for usb disk" msgstr "" -#: src/qemu/qemu_domainjob.c:1083 +#: src/hypervisor/virdomainjob.c:588 #, c-format msgid "unexpected async job %d type expected %d" msgstr "" diff --git a/src/hypervisor/meson.build b/src/hypervisor/meson.build index c81bdfa2fc..96afa0c52a 100644 --- a/src/hypervisor/meson.build +++ b/src/hypervisor/meson.build @@ -1,6 +1,7 @@ hypervisor_sources = [ 'domain_cgroup.c', 'domain_driver.c', + 'virdomainjob.c', 'virclosecallbacks.c', 'virhostdev.c', 'virmigration.c', diff --git a/src/qemu/qemu_domainjob.c b/src/hypervisor/virdomainjob.c similarity index 60% rename from src/qemu/qemu_domainjob.c rename to src/hypervisor/virdomainjob.c index ecd694958c..7de8d335e5 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/hypervisor/virdomainjob.c @@ -1,5 +1,5 @@ /* - * qemu_domainjob.c: helper functions for QEMU domain jobs + * virdomainjob.c: helper functions for domain jobs * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -18,20 +18,21 @@ #include <config.h> -#include "qemu_migration.h" -#include "qemu_domainjob.h" +#include "domain_conf.h" +#include "virdomainjob.h" +#include "virmigration.h" #include "viralloc.h" #include "virlog.h" #include "virerror.h" #include "virtime.h" #include "virthreadjob.h" -#define VIR_FROM_THIS VIR_FROM_QEMU +#define VIR_FROM_THIS VIR_FROM_NONE -VIR_LOG_INIT("qemu.qemu_domainjob"); +VIR_LOG_INIT("util.virdomainjob"); -VIR_ENUM_IMPL(qemuDomainJob, - QEMU_JOB_LAST, +VIR_ENUM_IMPL(virDomainJob, + VIR_JOB_LAST, "none", "query", "destroy", @@ -43,15 +44,15 @@ VIR_ENUM_IMPL(qemuDomainJob, "async nested", ); -VIR_ENUM_IMPL(qemuDomainAgentJob, - QEMU_AGENT_JOB_LAST, +VIR_ENUM_IMPL(virDomainAgentJob, + VIR_AGENT_JOB_LAST, "none", "query", "modify", ); -VIR_ENUM_IMPL(qemuDomainAsyncJob, - QEMU_ASYNC_JOB_LAST, +VIR_ENUM_IMPL(virDomainAsyncJob, + VIR_ASYNC_JOB_LAST, "none", "migration out", "migration in", @@ -63,22 +64,22 @@ VIR_ENUM_IMPL(qemuDomainAsyncJob, ); const char * -qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, +virDomainAsyncJobPhaseToString(virDomainAsyncJob job, int phase G_GNUC_UNUSED) { switch (job) { - case QEMU_ASYNC_JOB_MIGRATION_OUT: - case QEMU_ASYNC_JOB_MIGRATION_IN: + case VIR_ASYNC_JOB_MIGRATION_OUT: + case VIR_ASYNC_JOB_MIGRATION_IN: return virMigrationJobPhaseTypeToString(phase); - case QEMU_ASYNC_JOB_SAVE: - case QEMU_ASYNC_JOB_DUMP: - case QEMU_ASYNC_JOB_SNAPSHOT: - case QEMU_ASYNC_JOB_START: - case QEMU_ASYNC_JOB_NONE: - case QEMU_ASYNC_JOB_BACKUP: + case VIR_ASYNC_JOB_SAVE: + case VIR_ASYNC_JOB_DUMP: + case VIR_ASYNC_JOB_SNAPSHOT: + case VIR_ASYNC_JOB_START: + case VIR_ASYNC_JOB_NONE: + case VIR_ASYNC_JOB_BACKUP: G_GNUC_FALLTHROUGH; - case QEMU_ASYNC_JOB_LAST: + case VIR_ASYNC_JOB_LAST: break; } @@ -86,25 +87,25 @@ qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, } int -qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, +virDomainAsyncJobPhaseFromString(virDomainAsyncJob job, const char *phase) { if (!phase) return 0; switch (job) { - case QEMU_ASYNC_JOB_MIGRATION_OUT: - case QEMU_ASYNC_JOB_MIGRATION_IN: + case VIR_ASYNC_JOB_MIGRATION_OUT: + case VIR_ASYNC_JOB_MIGRATION_IN: return virMigrationJobPhaseTypeFromString(phase); - case QEMU_ASYNC_JOB_SAVE: - case QEMU_ASYNC_JOB_DUMP: - case QEMU_ASYNC_JOB_SNAPSHOT: - case QEMU_ASYNC_JOB_START: - case QEMU_ASYNC_JOB_NONE: - case QEMU_ASYNC_JOB_BACKUP: + case VIR_ASYNC_JOB_SAVE: + case VIR_ASYNC_JOB_DUMP: + case VIR_ASYNC_JOB_SNAPSHOT: + case VIR_ASYNC_JOB_START: + case VIR_ASYNC_JOB_NONE: + case VIR_ASYNC_JOB_BACKUP: G_GNUC_FALLTHROUGH; - case QEMU_ASYNC_JOB_LAST: + case VIR_ASYNC_JOB_LAST: break; } @@ -115,8 +116,8 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, } int -qemuDomainObjInitJob(qemuDomainJobObjPtr job, - qemuDomainJobPrivateCallbacksPtr cb) +virDomainObjInitJob(virDomainJobObjPtr job, + virDomainJobPrivateCallbacksPtr cb) { memset(job, 0, sizeof(*job)); job->cb = cb; @@ -140,9 +141,9 @@ qemuDomainObjInitJob(qemuDomainJobObjPtr job, static void -qemuDomainObjResetJob(qemuDomainJobObjPtr job) +virDomainObjResetJob(virDomainJobObjPtr job) { - job->active = QEMU_JOB_NONE; + job->active = VIR_JOB_NONE; job->owner = 0; job->ownerAPI = NULL; job->started = 0; @@ -150,9 +151,9 @@ qemuDomainObjResetJob(qemuDomainJobObjPtr job) static void -qemuDomainObjResetAgentJob(qemuDomainJobObjPtr job) +virDomainObjResetAgentJob(virDomainJobObjPtr job) { - job->agentActive = QEMU_AGENT_JOB_NONE; + job->agentActive = VIR_AGENT_JOB_NONE; job->agentOwner = 0; job->agentOwnerAPI = NULL; job->agentStarted = 0; @@ -160,14 +161,14 @@ qemuDomainObjResetAgentJob(qemuDomainJobObjPtr job) static void -qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) +virDomainObjResetAsyncJob(virDomainJobObjPtr job) { - job->asyncJob = QEMU_ASYNC_JOB_NONE; + job->asyncJob = VIR_ASYNC_JOB_NONE; job->asyncOwner = 0; job->asyncOwnerAPI = NULL; job->asyncStarted = 0; job->phase = 0; - job->mask = QEMU_JOB_DEFAULT_MASK; + job->mask = VIR_JOB_DEFAULT_MASK; job->abortJob = false; VIR_FREE(job->error); job->cb->jobcb->resetJobPrivate(job->privateData); @@ -175,8 +176,8 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) } int -qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, - qemuDomainJobObjPtr oldJob) +virDomainObjRestoreJob(virDomainJobObjPtr job, + virDomainJobObjPtr oldJob) { memset(oldJob, 0, sizeof(*oldJob)); oldJob->active = job->active; @@ -191,32 +192,32 @@ qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, return -1; oldJob->cb = job->cb; - qemuDomainObjResetJob(job); - qemuDomainObjResetAsyncJob(job); + virDomainObjResetJob(job); + virDomainObjResetAsyncJob(job); return 0; } void -qemuDomainObjFreeJob(qemuDomainJobObjPtr job) +virDomainObjFreeJob(virDomainJobObjPtr job) { - qemuDomainObjResetJob(job); - qemuDomainObjResetAsyncJob(job); + virDomainObjResetJob(job); + virDomainObjResetAsyncJob(job); job->cb->jobcb->freeJobPrivate(job->privateData); virCondDestroy(&job->cond); virCondDestroy(&job->asyncCond); } bool -qemuDomainTrackJob(qemuDomainJob job) +virDomainTrackJob(virDomainJob job) { - return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0; + return (VIR_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0; } void -qemuDomainObjSetJobPhase(virDomainObjPtr obj, - qemuDomainJobObjPtr job, - int phase) +virDomainObjSetJobPhase(virDomainObjPtr obj, + virDomainJobObjPtr job, + int phase) { unsigned long long me = virThreadSelfID(); @@ -224,12 +225,12 @@ qemuDomainObjSetJobPhase(virDomainObjPtr obj, return; VIR_DEBUG("Setting '%s' phase to '%s'", - qemuDomainAsyncJobTypeToString(job->asyncJob), - qemuDomainAsyncJobPhaseToString(job->asyncJob, phase)); + virDomainAsyncJobTypeToString(job->asyncJob), + virDomainAsyncJobPhaseToString(job->asyncJob, phase)); if (job->asyncOwner && me != job->asyncOwner) { VIR_WARN("'%s' async job is owned by thread %llu", - qemuDomainAsyncJobTypeToString(job->asyncJob), + virDomainAsyncJobTypeToString(job->asyncJob), job->asyncOwner); } @@ -239,77 +240,77 @@ qemuDomainObjSetJobPhase(virDomainObjPtr obj, } void -qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job, - unsigned long long allowedJobs) +virDomainObjSetAsyncJobMask(virDomainJobObjPtr job, + unsigned long long allowedJobs) { if (!job->asyncJob) return; - job->mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY); + job->mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY); } void -qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job) +virDomainObjDiscardAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr job) { - if (job->active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjResetJob(job); - qemuDomainObjResetAsyncJob(job); + if (job->active == VIR_JOB_ASYNC_NESTED) + virDomainObjResetJob(job); + virDomainObjResetAsyncJob(job); job->cb->saveStatus(obj); } void -qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job) +virDomainObjReleaseAsyncJob(virDomainJobObjPtr job) { VIR_DEBUG("Releasing ownership of '%s' async job", - qemuDomainAsyncJobTypeToString(job->asyncJob)); + virDomainAsyncJobTypeToString(job->asyncJob)); if (job->asyncOwner != virThreadSelfID()) { VIR_WARN("'%s' async job is owned by thread %llu", - qemuDomainAsyncJobTypeToString(job->asyncJob), + virDomainAsyncJobTypeToString(job->asyncJob), job->asyncOwner); } job->asyncOwner = 0; } static bool -qemuDomainNestedJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob) +virDomainNestedJobAllowed(virDomainJobObjPtr jobs, virDomainJob newJob) { return !jobs->asyncJob || - newJob == QEMU_JOB_NONE || + newJob == VIR_JOB_NONE || (jobs->mask & JOB_MASK(newJob)) != 0; } bool -qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob) +virDomainJobAllowed(virDomainJobObjPtr jobs, virDomainJob newJob) { - return !jobs->active && qemuDomainNestedJobAllowed(jobs, newJob); + return !jobs->active && virDomainNestedJobAllowed(jobs, newJob); } static bool -qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, - qemuDomainJob newJob, - qemuDomainAgentJob newAgentJob) +virDomainObjCanSetJob(virDomainJobObjPtr job, + virDomainJob newJob, + virDomainAgentJob newAgentJob) { - return ((newJob == QEMU_JOB_NONE || - job->active == QEMU_JOB_NONE) && - (newAgentJob == QEMU_AGENT_JOB_NONE || - job->agentActive == QEMU_AGENT_JOB_NONE)); + return ((newJob == VIR_JOB_NONE || + job->active == VIR_JOB_NONE) && + (newAgentJob == VIR_AGENT_JOB_NONE || + job->agentActive == VIR_AGENT_JOB_NONE)); } /* Give up waiting for mutex after 30 seconds */ -#define QEMU_JOB_WAIT_TIME (1000ull * 30) +#define VIR_JOB_WAIT_TIME (1000ull * 30) /** - * qemuDomainObjBeginJobInternal: + * virDomainObjBeginJobInternal: * @obj: domain object - * @job: qemuDomainJob to start - * @asyncJob: qemuDomainAsyncJob to start + * @job: virDomainJob to start + * @asyncJob: virDomainAsyncJob to start * @nowait: don't wait trying to acquire @job * * Acquires job for a domain object which must be locked before * calling. If there's already a job running waits up to - * QEMU_JOB_WAIT_TIME after which the functions fails reporting + * VIR_JOB_WAIT_TIME after which the functions fails reporting * an error unless @nowait is set. * * If @nowait is true this function tries to acquire job and if @@ -322,17 +323,17 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, * -1 otherwise. */ static int ATTRIBUTE_NONNULL(1) -qemuDomainObjBeginJobInternal(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainJob job, - qemuDomainAgentJob agentJob, - qemuDomainAsyncJob asyncJob, - bool nowait) +virDomainObjBeginJobInternal(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainJob job, + virDomainAgentJob agentJob, + virDomainAsyncJob asyncJob, + bool nowait) { unsigned long long now; unsigned long long then; - bool nested = job == QEMU_JOB_ASYNC_NESTED; - bool async = job == QEMU_JOB_ASYNC; + bool nested = job == VIR_JOB_ASYNC_NESTED; + bool async = job == VIR_JOB_ASYNC; const char *blocker = NULL; const char *agentBlocker = NULL; int ret = -1; @@ -342,28 +343,28 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, VIR_DEBUG("Starting job: job=%s agentJob=%s asyncJob=%s " "(vm=%p name=%s, current job=%s agentJob=%s async=%s)", - qemuDomainJobTypeToString(job), - qemuDomainAgentJobTypeToString(agentJob), - qemuDomainAsyncJobTypeToString(asyncJob), + virDomainJobTypeToString(job), + virDomainAgentJobTypeToString(agentJob), + virDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name, - qemuDomainJobTypeToString(jobObj->active), - qemuDomainAgentJobTypeToString(jobObj->agentActive), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); + virDomainJobTypeToString(jobObj->active), + virDomainAgentJobTypeToString(jobObj->agentActive), + virDomainAsyncJobTypeToString(jobObj->asyncJob)); if (virTimeMillisNow(&now) < 0) return -1; jobObj->cb->jobcb->increaseJobsQueued(obj); - then = now + QEMU_JOB_WAIT_TIME; + then = now + VIR_JOB_WAIT_TIME; retry: - if ((!async && job != QEMU_JOB_DESTROY) && + if ((!async && job != VIR_JOB_DESTROY) && jobObj->cb->jobcb->getMaxQueuedJobs(obj) && jobObj->cb->jobcb->getJobsQueued(obj) > jobObj->cb->jobcb->getMaxQueuedJobs(obj)) { goto error; } - while (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) { + while (!nested && !virDomainNestedJobAllowed(jobObj, job)) { if (nowait) goto cleanup; @@ -372,7 +373,7 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, goto error; } - while (!qemuDomainObjCanSetJob(jobObj, job, agentJob)) { + while (!virDomainObjCanSetJob(jobObj, job, agentJob)) { if (nowait) goto cleanup; @@ -383,18 +384,18 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, /* No job is active but a new async job could have been started while obj * was unlocked, so we need to recheck it. */ - if (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) + if (!nested && !virDomainNestedJobAllowed(jobObj, job)) goto retry; ignore_value(virTimeMillisNow(&now)); if (job) { - qemuDomainObjResetJob(jobObj); + virDomainObjResetJob(jobObj); - if (job != QEMU_JOB_ASYNC) { + if (job != VIR_JOB_ASYNC) { VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)", - qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + virDomainJobTypeToString(job), + virDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); jobObj->active = job; jobObj->owner = virThreadSelfID(); @@ -402,9 +403,9 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, jobObj->started = now; } else { VIR_DEBUG("Started async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(asyncJob), + virDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); - qemuDomainObjResetAsyncJob(jobObj); + virDomainObjResetAsyncJob(jobObj); jobObj->cb->jobcb->currentJobInfoInit(jobObj, now); jobObj->asyncJob = asyncJob; jobObj->asyncOwner = virThreadSelfID(); @@ -414,20 +415,20 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, } if (agentJob) { - qemuDomainObjResetAgentJob(jobObj); + virDomainObjResetAgentJob(jobObj); VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)", - qemuDomainAgentJobTypeToString(agentJob), + virDomainAgentJobTypeToString(agentJob), obj, obj->def->name, - qemuDomainJobTypeToString(jobObj->active), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); + virDomainJobTypeToString(jobObj->active), + virDomainAsyncJobTypeToString(jobObj->asyncJob)); jobObj->agentActive = agentJob; jobObj->agentOwner = virThreadSelfID(); jobObj->agentOwnerAPI = virThreadJobGet(); jobObj->agentStarted = now; } - if (qemuDomainTrackJob(job)) + if (virDomainTrackJob(job)) jobObj->cb->saveStatus(obj); return 0; @@ -445,13 +446,13 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, "current job is (%s, %s, %s) " "owned by (%llu %s, %llu %s, %llu %s (flags=0x%lx)) " "for (%llus, %llus, %llus)", - qemuDomainJobTypeToString(job), - qemuDomainAgentJobTypeToString(agentJob), - qemuDomainAsyncJobTypeToString(asyncJob), + virDomainJobTypeToString(job), + virDomainAgentJobTypeToString(agentJob), + virDomainAsyncJobTypeToString(asyncJob), obj->def->name, - qemuDomainJobTypeToString(jobObj->active), - qemuDomainAgentJobTypeToString(jobObj->agentActive), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + virDomainJobTypeToString(jobObj->active), + virDomainAgentJobTypeToString(jobObj->agentActive), + virDomainAsyncJobTypeToString(jobObj->asyncJob), jobObj->owner, NULLSTR(jobObj->ownerAPI), jobObj->agentOwner, NULLSTR(jobObj->agentOwnerAPI), jobObj->asyncOwner, NULLSTR(jobObj->asyncOwnerAPI), @@ -459,7 +460,7 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, duration / 1000, agentDuration / 1000, asyncDuration / 1000); if (job) { - if (nested || qemuDomainNestedJobAllowed(jobObj, job)) + if (nested || virDomainNestedJobAllowed(jobObj, job)) blocker = jobObj->ownerAPI; else blocker = jobObj->asyncOwnerAPI; @@ -528,48 +529,48 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, * obj must be locked before calling * * This must be called by anything that will change the VM state - * in any way, or anything that will use the QEMU monitor. + * in any way, or anything that will use the (QEMU) monitor. * * Successful calls must be followed by EndJob eventually */ -int qemuDomainObjBeginJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainJob job) +int virDomainObjBeginJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainJob job) { - if (qemuDomainObjBeginJobInternal(obj, jobObj, job, - QEMU_AGENT_JOB_NONE, - QEMU_ASYNC_JOB_NONE, false) < 0) + if (virDomainObjBeginJobInternal(obj, jobObj, job, + VIR_AGENT_JOB_NONE, + VIR_ASYNC_JOB_NONE, false) < 0) return -1; else return 0; } /** - * qemuDomainObjBeginAgentJob: + * virDomainObjBeginAgentJob: * * Grabs agent type of job. Use if caller talks to guest agent only. * - * To end job call qemuDomainObjEndAgentJob. + * To end job call virDomainObjEndAgentJob. */ int -qemuDomainObjBeginAgentJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainAgentJob agentJob) +virDomainObjBeginAgentJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainAgentJob agentJob) { - return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_NONE, - agentJob, - QEMU_ASYNC_JOB_NONE, false); + return virDomainObjBeginJobInternal(obj, jobObj, VIR_JOB_NONE, + agentJob, + VIR_ASYNC_JOB_NONE, false); } -int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainAsyncJob asyncJob, - virDomainJobOperation operation, - unsigned long apiFlags) +int virDomainObjBeginAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainAsyncJob asyncJob, + virDomainJobOperation operation, + unsigned long apiFlags) { - if (qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC, - QEMU_AGENT_JOB_NONE, - asyncJob, false) < 0) + if (virDomainObjBeginJobInternal(obj, jobObj, VIR_JOB_ASYNC, + VIR_AGENT_JOB_NONE, + asyncJob, false) < 0) return -1; jobObj->cb->jobcb->setJobInfoOperation(jobObj, operation); @@ -578,9 +579,9 @@ int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, } int -qemuDomainObjBeginNestedJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainAsyncJob asyncJob) +virDomainObjBeginNestedJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainAsyncJob asyncJob) { if (asyncJob != jobObj->asyncJob) { virReportError(VIR_ERR_INTERNAL_ERROR, @@ -594,56 +595,56 @@ qemuDomainObjBeginNestedJob(virDomainObjPtr obj, jobObj->asyncOwner); } - return qemuDomainObjBeginJobInternal(obj, jobObj, - QEMU_JOB_ASYNC_NESTED, - QEMU_AGENT_JOB_NONE, - QEMU_ASYNC_JOB_NONE, + return virDomainObjBeginJobInternal(obj, jobObj, + VIR_JOB_ASYNC_NESTED, + VIR_AGENT_JOB_NONE, + VIR_ASYNC_JOB_NONE, false); } /** - * qemuDomainObjBeginJobNowait: + * virDomainObjBeginJobNowait: * * @obj: domain object - * @jobObj: qemuDomainJobObjPtr - * @job: qemuDomainJob to start + * @jobObj: virDomainJobObjPtr + * @job: virDomainJob to start * * Acquires job for a domain object which must be locked before * calling. If there's already a job running it returns * immediately without any error reported. * - * Returns: see qemuDomainObjBeginJobInternal + * Returns: see virDomainObjBeginJobInternal */ int -qemuDomainObjBeginJobNowait(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainJob job) +virDomainObjBeginJobNowait(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainJob job) { - return qemuDomainObjBeginJobInternal(obj, jobObj, job, - QEMU_AGENT_JOB_NONE, - QEMU_ASYNC_JOB_NONE, true); + return virDomainObjBeginJobInternal(obj, jobObj, job, + VIR_AGENT_JOB_NONE, + VIR_ASYNC_JOB_NONE, true); } /* * obj must be locked and have a reference before calling * * To be called after completing the work associated with the - * earlier qemuDomainBeginJob() call + * earlier virDomainBeginJob() call */ void -qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj) +virDomainObjEndJob(virDomainObjPtr obj, virDomainJobObjPtr jobObj) { - qemuDomainJob job = jobObj->active; + virDomainJob job = jobObj->active; jobObj->cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)", - qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + virDomainJobTypeToString(job), + virDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetJob(jobObj); - if (qemuDomainTrackJob(job)) + virDomainObjResetJob(jobObj); + if (virDomainTrackJob(job)) jobObj->cb->saveStatus(obj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ @@ -651,45 +652,45 @@ qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj) } void -qemuDomainObjEndAgentJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj) +virDomainObjEndAgentJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj) { - qemuDomainAgentJob agentJob = jobObj->agentActive; + virDomainAgentJob agentJob = jobObj->agentActive; jobObj->cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)", - qemuDomainAgentJobTypeToString(agentJob), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + virDomainAgentJobTypeToString(agentJob), + virDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetAgentJob(jobObj); + virDomainObjResetAgentJob(jobObj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ virCondBroadcast(&jobObj->cond); } void -qemuDomainObjEndAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj) +virDomainObjEndAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj) { jobObj->cb->jobcb->decreaseJobsQueued(obj); VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + virDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetAsyncJob(jobObj); + virDomainObjResetAsyncJob(jobObj); jobObj->cb->saveStatus(obj); virCondBroadcast(&jobObj->asyncCond); } void -qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job) +virDomainObjAbortAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr job) { VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(job->asyncJob), + virDomainAsyncJobTypeToString(job->asyncJob), obj, obj->def->name); job->abortJob = true; @@ -697,32 +698,32 @@ qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, } int -qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm, - qemuDomainJobObjPtr jobObj) +virDomainObjPrivateXMLFormatJob(virBufferPtr buf, + virDomainObjPtr vm, + virDomainJobObjPtr jobObj) { g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); - qemuDomainJob job = jobObj->active; + virDomainJob job = jobObj->active; - if (!qemuDomainTrackJob(job)) - job = QEMU_JOB_NONE; + if (!virDomainTrackJob(job)) + job = VIR_JOB_NONE; - if (job == QEMU_JOB_NONE && - jobObj->asyncJob == QEMU_ASYNC_JOB_NONE) + if (job == VIR_JOB_NONE && + jobObj->asyncJob == VIR_ASYNC_JOB_NONE) return 0; virBufferAsprintf(&attrBuf, " type='%s' async='%s'", - qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); + virDomainJobTypeToString(job), + virDomainAsyncJobTypeToString(jobObj->asyncJob)); if (jobObj->phase) { virBufferAsprintf(&attrBuf, " phase='%s'", - qemuDomainAsyncJobPhaseToString(jobObj->asyncJob, + virDomainAsyncJobPhaseToString(jobObj->asyncJob, jobObj->phase)); } - if (jobObj->asyncJob != QEMU_ASYNC_JOB_NONE) + if (jobObj->asyncJob != VIR_ASYNC_JOB_NONE) virBufferAsprintf(&attrBuf, " flags='0x%lx'", jobObj->apiFlags); if (jobObj->cb->jobcb->formatJob(&childBuf, jobObj, vm) < 0) @@ -735,9 +736,9 @@ qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, int -qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - xmlXPathContextPtr ctxt, - qemuDomainJobObjPtr job) +virDomainObjPrivateXMLParseJob(virDomainObjPtr vm, + xmlXPathContextPtr ctxt, + virDomainJobObjPtr job) { VIR_XPATH_NODE_AUTORESTORE(ctxt) g_autofree char *tmp = NULL; @@ -748,7 +749,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, if ((tmp = virXPathString("string(@type)", ctxt))) { int type; - if ((type = qemuDomainJobTypeFromString(tmp)) < 0) { + if ((type = virDomainJobTypeFromString(tmp)) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Unknown job type %s"), tmp); return -1; @@ -760,7 +761,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, if ((tmp = virXPathString("string(@async)", ctxt))) { int async; - if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) { + if ((async = virDomainAsyncJobTypeFromString(tmp)) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Unknown async job type %s"), tmp); return -1; @@ -769,7 +770,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, job->asyncJob = async; if ((tmp = virXPathString("string(@phase)", ctxt))) { - job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp); + job->phase = virDomainAsyncJobPhaseFromString(async, tmp); if (job->phase < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Unknown job phase %s"), tmp); diff --git a/src/hypervisor/virdomainjob.h b/src/hypervisor/virdomainjob.h new file mode 100644 index 0000000000..0c3265aeb1 --- /dev/null +++ b/src/hypervisor/virdomainjob.h @@ -0,0 +1,243 @@ +/* + * virdomainjob.h: helper functions for domain jobs + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see + * <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include <glib-object.h> + +#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) +#define VIR_JOB_DEFAULT_MASK \ + (JOB_MASK(VIR_JOB_QUERY) | \ + JOB_MASK(VIR_JOB_DESTROY) | \ + JOB_MASK(VIR_JOB_ABORT)) + +/* Jobs which have to be tracked in domain state XML. */ +#define VIR_DOMAIN_TRACK_JOBS \ + (JOB_MASK(VIR_JOB_DESTROY) | \ + JOB_MASK(VIR_JOB_ASYNC)) + +/* Only 1 job is allowed at any time + * A job includes *all* monitor commands, even those just querying + * information, not merely actions */ +typedef enum { + VIR_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */ + VIR_JOB_QUERY, /* Doesn't change any state */ + VIR_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */ + VIR_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */ + VIR_JOB_MODIFY, /* May change state */ + VIR_JOB_ABORT, /* Abort current async job */ + VIR_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */ + + /* The following two items must always be the last items before JOB_LAST */ + VIR_JOB_ASYNC, /* Asynchronous job */ + VIR_JOB_ASYNC_NESTED, /* Normal job within an async job */ + + VIR_JOB_LAST +} virDomainJob; +VIR_ENUM_DECL(virDomainJob); + +typedef enum { + VIR_AGENT_JOB_NONE = 0, /* No agent job. */ + VIR_AGENT_JOB_QUERY, /* Does not change state of domain */ + VIR_AGENT_JOB_MODIFY, /* May change state of domain */ + + VIR_AGENT_JOB_LAST +} virDomainAgentJob; +VIR_ENUM_DECL(virDomainAgentJob); + +/* Async job consists of a series of jobs that may change state. Independent + * jobs that do not change state (and possibly others if explicitly allowed by + * current async job) are allowed to be run even if async job is active. + */ +typedef enum { + VIR_ASYNC_JOB_NONE = 0, + VIR_ASYNC_JOB_MIGRATION_OUT, + VIR_ASYNC_JOB_MIGRATION_IN, + VIR_ASYNC_JOB_SAVE, + VIR_ASYNC_JOB_DUMP, + VIR_ASYNC_JOB_SNAPSHOT, + VIR_ASYNC_JOB_START, + VIR_ASYNC_JOB_BACKUP, + + VIR_ASYNC_JOB_LAST +} virDomainAsyncJob; +VIR_ENUM_DECL(virDomainAsyncJob); + +typedef enum { + VIR_DOMAIN_JOB_STATUS_NONE = 0, + VIR_DOMAIN_JOB_STATUS_ACTIVE, + VIR_DOMAIN_JOB_STATUS_MIGRATING, + VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED, + VIR_DOMAIN_JOB_STATUS_PAUSED, + VIR_DOMAIN_JOB_STATUS_POSTCOPY, + VIR_DOMAIN_JOB_STATUS_COMPLETED, + VIR_DOMAIN_JOB_STATUS_FAILED, + VIR_DOMAIN_JOB_STATUS_CANCELED, +} virDomainJobStatus; + +typedef enum { + VIR_DOMAIN_JOB_STATS_TYPE_NONE = 0, + VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION, + VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP, + VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP, + VIR_DOMAIN_JOB_STATS_TYPE_BACKUP, +} virDomainJobStatsType; + +typedef struct _virDomainJobObj virDomainJobObj; +typedef virDomainJobObj *virDomainJobObjPtr; + +typedef void *(*virDomainObjPrivateJobAlloc)(void); +typedef void (*virDomainObjPrivateJobFree)(void *); +typedef void (*virDomainObjPrivateJobReset)(void *); +typedef void (*virDomainObjPrivateSaveStatus)(virDomainObjPtr); +typedef int (*virDomainObjPrivateJobFormat)(virBufferPtr, + virDomainJobObjPtr, + virDomainObjPtr); +typedef int (*virDomainObjPrivateJobParse)(xmlXPathContextPtr, virDomainJobObjPtr, + virDomainObjPtr); +typedef void (*virDomainObjJobInfoSetOperation)(virDomainJobObjPtr, + virDomainJobOperation); +typedef void (*virDomainObjCurrentJobInfoInit)(virDomainJobObjPtr, + unsigned long long); +typedef int (*virDomainObjGetJobsQueued)(virDomainObjPtr); +typedef void (*virDomainObjIncreaseJobsQueued)(virDomainObjPtr); +typedef void (*virDomainObjDecreaseJobsQueued)(virDomainObjPtr); +typedef int (*virDomainObjGetMaxQueuedJobs)(virDomainObjPtr); + +typedef struct _virDomainJobPrivateJobCallbacks virDomainJobPrivateJobCallbacks; +typedef virDomainJobPrivateJobCallbacks *virDomainJobPrivateJobCallbacksPtr; +struct _virDomainJobPrivateJobCallbacks { + virDomainObjPrivateJobAlloc allocJobPrivate; + virDomainObjPrivateJobFree freeJobPrivate; + virDomainObjPrivateJobReset resetJobPrivate; + virDomainObjPrivateJobFormat formatJob; + virDomainObjPrivateJobParse parseJob; + virDomainObjJobInfoSetOperation setJobInfoOperation; + virDomainObjCurrentJobInfoInit currentJobInfoInit; + virDomainObjGetJobsQueued getJobsQueued; + virDomainObjIncreaseJobsQueued increaseJobsQueued; + virDomainObjDecreaseJobsQueued decreaseJobsQueued; + virDomainObjGetMaxQueuedJobs getMaxQueuedJobs; +}; + +typedef struct _virDomainJobPrivateCallbacks virDomainJobPrivateCallbacks; +typedef virDomainJobPrivateCallbacks *virDomainJobPrivateCallbacksPtr; +struct _virDomainJobPrivateCallbacks { + /* generic callbacks that we can't really categorize */ + virDomainObjPrivateSaveStatus saveStatus; + + /* Job related callbacks */ + virDomainJobPrivateJobCallbacksPtr jobcb; +}; + +struct _virDomainJobObj { + virCond cond; /* Use to coordinate jobs */ + + /* The following members are for VIR_JOB_* */ + virDomainJob active; /* Currently running job */ + unsigned long long owner; /* Thread id which set current job */ + const char *ownerAPI; /* The API which owns the job */ + unsigned long long started; /* When the current job started */ + + /* The following members are for VIR_AGENT_JOB_* */ + virDomainAgentJob agentActive; /* Currently running agent job */ + unsigned long long agentOwner; /* Thread id which set current agent job */ + const char *agentOwnerAPI; /* The API which owns the agent job */ + unsigned long long agentStarted; /* When the current agent job started */ + + /* The following members are for VIR_ASYNC_JOB_* */ + virCond asyncCond; /* Use to coordinate with async jobs */ + virDomainAsyncJob asyncJob; /* Currently active async job */ + unsigned long long asyncOwner; /* Thread which set current async job */ + const char *asyncOwnerAPI; /* The API which owns the async job */ + unsigned long long asyncStarted; /* When the current async job started */ + int phase; /* Job phase (mainly for migrations) */ + unsigned long long mask; /* Jobs allowed during async job */ + bool abortJob; /* abort of the job requested */ + char *error; /* job event completion error */ + unsigned long apiFlags; /* flags passed to the API which started the async job */ + + void *privateData; /* job specific collection of data */ + virDomainJobPrivateCallbacksPtr cb; +}; + +const char *virDomainAsyncJobPhaseToString(virDomainAsyncJob job, + int phase); +int virDomainAsyncJobPhaseFromString(virDomainAsyncJob job, + const char *phase); + +int virDomainObjBeginJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainJob job) + G_GNUC_WARN_UNUSED_RESULT; +int virDomainObjBeginAgentJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainAgentJob agentJob) + G_GNUC_WARN_UNUSED_RESULT; +int virDomainObjBeginAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainAsyncJob asyncJob, + virDomainJobOperation operation, + unsigned long apiFlags) + G_GNUC_WARN_UNUSED_RESULT; +int virDomainObjBeginNestedJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainAsyncJob asyncJob) + G_GNUC_WARN_UNUSED_RESULT; +int virDomainObjBeginJobNowait(virDomainObjPtr obj, + virDomainJobObjPtr jobObj, + virDomainJob job) + G_GNUC_WARN_UNUSED_RESULT; + +void virDomainObjEndJob(virDomainObjPtr obj, virDomainJobObjPtr jobObj); +void virDomainObjEndAgentJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj); +void virDomainObjEndAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr jobObj); +void virDomainObjAbortAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr job); +void virDomainObjSetJobPhase(virDomainObjPtr obj, + virDomainJobObjPtr job, + int phase); +void virDomainObjSetAsyncJobMask(virDomainJobObjPtr job, + unsigned long long allowedJobs); +int virDomainObjRestoreJob(virDomainJobObjPtr job, + virDomainJobObjPtr oldJob); +void virDomainObjDiscardAsyncJob(virDomainObjPtr obj, + virDomainJobObjPtr job); +void virDomainObjReleaseAsyncJob(virDomainJobObjPtr job); + +bool virDomainTrackJob(virDomainJob job); + +void virDomainObjFreeJob(virDomainJobObjPtr job); + +int +virDomainObjInitJob(virDomainJobObjPtr job, + virDomainJobPrivateCallbacksPtr cb); + +bool virDomainJobAllowed(virDomainJobObjPtr jobs, virDomainJob newJob); + +int +virDomainObjPrivateXMLFormatJob(virBufferPtr buf, + virDomainObjPtr vm, + virDomainJobObjPtr jobObj); + +int +virDomainObjPrivateXMLParseJob(virDomainObjPtr vm, + xmlXPathContextPtr ctxt, + virDomainJobObjPtr job); diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index c7adf16aba..d3b8833844 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -1447,6 +1447,34 @@ virCloseCallbacksSet; virCloseCallbacksUnset; +# hypervisor/virdomainjob.h +virDomainAsyncJobPhaseFromString; +virDomainAsyncJobPhaseToString; +virDomainAsyncJobTypeFromString; +virDomainAsyncJobTypeToString; +virDomainJobAllowed; +virDomainJobTypeFromString; +virDomainJobTypeToString; +virDomainObjAbortAsyncJob; +virDomainObjBeginAgentJob; +virDomainObjBeginAsyncJob; +virDomainObjBeginJob; +virDomainObjBeginJobNowait; +virDomainObjBeginNestedJob; +virDomainObjDiscardAsyncJob; +virDomainObjEndAgentJob; +virDomainObjEndAsyncJob; +virDomainObjEndJob; +virDomainObjFreeJob; +virDomainObjInitJob; +virDomainObjPrivateXMLFormatJob; +virDomainObjPrivateXMLParseJob; +virDomainObjReleaseAsyncJob; +virDomainObjRestoreJob; +virDomainObjSetAsyncJobMask; +virDomainObjSetJobPhase; + + # hypervisor/virhostdev.h virHostdevFindUSBDevice; virHostdevManagerGetDefault; diff --git a/src/qemu/meson.build b/src/qemu/meson.build index 4e599d1e69..1be0da010b 100644 --- a/src/qemu/meson.build +++ b/src/qemu/meson.build @@ -12,7 +12,6 @@ qemu_driver_sources = [ 'qemu_dbus.c', 'qemu_domain.c', 'qemu_domain_address.c', - 'qemu_domainjob.c', 'qemu_driver.c', 'qemu_extdevice.c', 'qemu_firmware.c', diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 4e606c252f..7d951d7786 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -436,10 +436,10 @@ qemuBackupDiskPrepareOneStorage(virDomainObjPtr vm, if (qemuBlockStorageSourceCreate(vm, dd->store, dd->backingStore, NULL, dd->crdata->srcdata[0], - QEMU_ASYNC_JOB_BACKUP) < 0) + VIR_ASYNC_JOB_BACKUP) < 0) return -1; } else { - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) < 0) return -1; rc = qemuBlockStorageSourceAttachApply(priv->mon, dd->crdata->srcdata[0]); @@ -525,7 +525,7 @@ qemuBackupBeginPullExportDisks(virDomainObjPtr vm, void qemuBackupJobTerminate(virDomainObjPtr vm, - qemuDomainJobStatus jobstatus) + virDomainJobStatus jobstatus) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -550,7 +550,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, if (!(priv->job.apiFlags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL) && (priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL || (priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH && - jobstatus != QEMU_DOMAIN_JOB_STATUS_COMPLETED))) { + jobstatus != VIR_DOMAIN_JOB_STATUS_COMPLETED))) { g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); @@ -572,7 +572,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, virDomainBackupDefFree(priv->backup); priv->backup = NULL; - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); } @@ -640,7 +640,7 @@ qemuBackupJobCancelBlockjobs(virDomainObjPtr vm, } if (terminatebackup && !has_active) - qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED); + qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED); } @@ -740,15 +740,15 @@ qemuBackupBegin(virDomainObjPtr vm, * infrastructure for async jobs. We'll allow standard modify-type jobs * as the interlocking of conflicting operations is handled on the block * job level */ - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_BACKUP, + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_BACKUP, VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0) return -1; - qemuDomainObjSetAsyncJobMask(&priv->job, - (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MODIFY))); - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + virDomainObjSetAsyncJobMask(&priv->job, + (VIR_JOB_DEFAULT_MASK | + JOB_MASK(VIR_JOB_SUSPEND) | + JOB_MASK(VIR_JOB_MODIFY))); + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_BACKUP; if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", @@ -787,7 +787,7 @@ qemuBackupBegin(virDomainObjPtr vm, goto endjob; } - if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_BACKUP))) + if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_BACKUP))) goto endjob; if ((ndd = qemuBackupDiskPrepareData(vm, def, blockNamedNodeData, actions, @@ -805,7 +805,7 @@ qemuBackupBegin(virDomainObjPtr vm, priv->backup = g_steal_pointer(&def); - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) < 0) goto endjob; /* TODO: TLS is a must-have for the modern age */ @@ -838,7 +838,7 @@ qemuBackupBegin(virDomainObjPtr vm, } if (pull) { - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) < 0) goto endjob; /* note that if the export fails we've already created the checkpoint * and we will not delete it */ @@ -847,7 +847,7 @@ qemuBackupBegin(virDomainObjPtr vm, goto endjob; if (rc < 0) { - qemuBackupJobCancelBlockjobs(vm, priv->backup, false, QEMU_ASYNC_JOB_BACKUP); + qemuBackupJobCancelBlockjobs(vm, priv->backup, false, VIR_ASYNC_JOB_BACKUP); goto endjob; } } @@ -864,7 +864,7 @@ qemuBackupBegin(virDomainObjPtr vm, qemuCheckpointRollbackMetadata(vm, chk); if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) && - qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) == 0) { + qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_BACKUP) == 0) { if (nbd_running) ignore_value(qemuMonitorNBDServerStop(priv->mon)); if (tlsAlias) @@ -878,9 +878,9 @@ qemuBackupBegin(virDomainObjPtr vm, def = g_steal_pointer(&priv->backup); if (ret == 0) - qemuDomainObjReleaseAsyncJob(&priv->job); + virDomainObjReleaseAsyncJob(&priv->job); else - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); return ret; } @@ -919,7 +919,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm, bool has_cancelling = false; bool has_cancelled = false; bool has_failed = false; - qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + virDomainJobStatus jobstatus = VIR_DOMAIN_JOB_STATUS_COMPLETED; virDomainBackupDefPtr backup = priv->backup; size_t i; @@ -1017,9 +1017,9 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm, /* all sub-jobs have stopped */ if (has_failed) - jobstatus = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobstatus = VIR_DOMAIN_JOB_STATUS_FAILED; else if (has_cancelled && backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH) - jobstatus = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobstatus = VIR_DOMAIN_JOB_STATUS_CANCELED; qemuBackupJobTerminate(vm, jobstatus); } @@ -1088,7 +1088,7 @@ qemuBackupGetJobInfoStats(virDomainObjPtr vm, if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) return -1; - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; qemuDomainObjEnterMonitor(vm); diff --git a/src/qemu/qemu_backup.h b/src/qemu/qemu_backup.h index 9925fddbf9..6cd1797cae 100644 --- a/src/qemu/qemu_backup.h +++ b/src/qemu/qemu_backup.h @@ -45,7 +45,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm, void qemuBackupJobTerminate(virDomainObjPtr vm, - qemuDomainJobStatus jobstatus); + virDomainJobStatus jobstatus); int qemuBackupGetJobInfoStats(virDomainObjPtr vm, diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c index 23b60e73ec..c2f3cacbf2 100644 --- a/src/qemu/qemu_block.c +++ b/src/qemu/qemu_block.c @@ -321,7 +321,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk, int qemuBlockNodeNamesDetect(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virHashTable) disktable = NULL; @@ -1985,7 +1985,7 @@ qemuBlockStorageSourceChainDetach(qemuMonitorPtr mon, */ int qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virStorageSourcePtr src) { int ret; @@ -2545,7 +2545,7 @@ qemuBlockStorageSourceCreateGeneric(virDomainObjPtr vm, virStorageSourcePtr src, virStorageSourcePtr chain, bool storageCreate, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { g_autoptr(virJSONValue) props = createProps; qemuDomainObjPrivatePtr priv = vm->privateData; @@ -2600,7 +2600,7 @@ static int qemuBlockStorageSourceCreateStorage(virDomainObjPtr vm, virStorageSourcePtr src, virStorageSourcePtr chain, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { int actualType = virStorageSourceGetActualType(src); g_autoptr(virJSONValue) createstorageprops = NULL; @@ -2637,7 +2637,7 @@ qemuBlockStorageSourceCreateFormat(virDomainObjPtr vm, virStorageSourcePtr src, virStorageSourcePtr backingStore, virStorageSourcePtr chain, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { g_autoptr(virJSONValue) createformatprops = NULL; int ret; @@ -2687,7 +2687,7 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, virStorageSourcePtr backingStore, virStorageSourcePtr chain, qemuBlockStorageSourceAttachDataPtr data, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; @@ -2855,7 +2855,7 @@ qemuBlockNamedNodeDataGetBitmapByName(virHashTablePtr blockNamedNodeData, virHashTablePtr qemuBlockGetNamedNodeData(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virHashTable) blockNamedNodeData = NULL; @@ -3178,7 +3178,7 @@ qemuBlockBitmapsHandleCommitFinish(virStorageSourcePtr topsrc, static int qemuBlockReopenFormat(virDomainObjPtr vm, virStorageSourcePtr src, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virJSONValue) reopenprops = NULL; @@ -3221,7 +3221,7 @@ qemuBlockReopenFormat(virDomainObjPtr vm, int qemuBlockReopenReadWrite(virDomainObjPtr vm, virStorageSourcePtr src, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { if (!src->readonly) return 0; @@ -3250,7 +3250,7 @@ qemuBlockReopenReadWrite(virDomainObjPtr vm, int qemuBlockReopenReadOnly(virDomainObjPtr vm, virStorageSourcePtr src, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { if (src->readonly) return 0; diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h index 35148ea2ba..55583faa93 100644 --- a/src/qemu/qemu_block.h +++ b/src/qemu/qemu_block.h @@ -47,7 +47,7 @@ qemuBlockNodeNameGetBackingChain(virJSONValuePtr namednodesdata, int qemuBlockNodeNamesDetect(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); virHashTablePtr qemuBlockGetNodeData(virJSONValuePtr data); @@ -140,7 +140,7 @@ qemuBlockStorageSourceAttachRollback(qemuMonitorPtr mon, int qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virStorageSourcePtr src); struct _qemuBlockStorageSourceChainData { @@ -205,7 +205,7 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, virStorageSourcePtr backingStore, virStorageSourcePtr chain, qemuBlockStorageSourceAttachDataPtr data, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuBlockStorageSourceCreateDetectSize(virHashTablePtr blockNamedNodeData, @@ -225,7 +225,7 @@ qemuBlockNamedNodeDataGetBitmapByName(virHashTablePtr blockNamedNodeData, virHashTablePtr qemuBlockGetNamedNodeData(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuBlockGetBitmapMergeActions(virStorageSourcePtr topsrc, @@ -259,11 +259,11 @@ qemuBlockBitmapsHandleCommitFinish(virStorageSourcePtr topsrc, int qemuBlockReopenReadWrite(virDomainObjPtr vm, virStorageSourcePtr src, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuBlockReopenReadOnly(virDomainObjPtr vm, virStorageSourcePtr src, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); bool qemuBlockStorageSourceNeedsStorageSliceLayer(const virStorageSource *src); diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 265f449b7a..62b8e014d3 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -567,7 +567,7 @@ qemuBlockJobRefreshJobs(virDomainObjPtr vm) job->reconnected = true; if (job->newstate != -1) - qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); /* 'job' may be invalid after this update */ } @@ -834,7 +834,7 @@ qemuBlockJobEventProcessLegacy(virQEMUDriverPtr driver, static void qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virStorageSourcePtr chain) { g_autoptr(qemuBlockStorageSourceChainData) data = NULL; @@ -938,7 +938,7 @@ qemuBlockJobClearConfigChain(virDomainObjPtr vm, static int qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virHashTable) blockNamedNodeData = NULL; @@ -989,7 +989,7 @@ static void qemuBlockJobProcessEventCompletedPull(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { virStorageSourcePtr baseparent = NULL; virDomainDiskDefPtr cfgdisk = NULL; @@ -1093,7 +1093,7 @@ qemuBlockJobDeleteImages(virQEMUDriverPtr driver, static int qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virHashTable) blockNamedNodeData = NULL; @@ -1156,7 +1156,7 @@ static void qemuBlockJobProcessEventCompletedCommit(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { virStorageSourcePtr baseparent = NULL; virDomainDiskDefPtr cfgdisk = NULL; @@ -1248,7 +1248,7 @@ static void qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { virStorageSourcePtr baseparent = NULL; virDomainDiskDefPtr cfgdisk = NULL; @@ -1322,7 +1322,7 @@ qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriverPtr driver, static int qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virHashTable) blockNamedNodeData = NULL; @@ -1360,7 +1360,7 @@ static void qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name, vm->def->name); @@ -1396,7 +1396,7 @@ static void qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { VIR_DEBUG("copy job '%s' on VM '%s' aborted", job->name, vm->def->name); @@ -1416,7 +1416,7 @@ static void qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virJSONValue) actions = virJSONValueNewArray(); @@ -1452,7 +1452,7 @@ static void qemuBlockJobProcessEventConcludedCreate(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL; @@ -1495,7 +1495,7 @@ static void qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuBlockjobState newstate, unsigned long long progressCurrent, unsigned long long progressTotal) @@ -1540,7 +1540,7 @@ static void qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job, virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, unsigned long long progressCurrent, unsigned long long progressTotal) { @@ -1600,7 +1600,7 @@ static void qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job, virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuMonitorJobInfoPtr *jobinfo = NULL; size_t njobinfo = 0; @@ -1682,7 +1682,7 @@ static void qemuBlockJobEventProcess(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuBlockJobDataPtr job, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { switch ((qemuBlockjobState) job->newstate) { diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c index e9547da555..ec811c9c63 100644 --- a/src/qemu/qemu_checkpoint.c +++ b/src/qemu/qemu_checkpoint.c @@ -162,7 +162,7 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm, actions = virJSONValueNewArray(); - if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE))) + if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE))) return -1; for (i = 0; i < chkdef->ndisks; i++) { @@ -192,7 +192,7 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm, goto relabel; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN) && - qemuBlockReopenReadWrite(vm, src, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockReopenReadWrite(vm, src, VIR_ASYNC_JOB_NONE) < 0) goto relabel; relabelimages = g_slist_prepend(relabelimages, src); @@ -208,7 +208,7 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm, virStorageSourcePtr src = next->data; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN)) - ignore_value(qemuBlockReopenReadOnly(vm, src, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuBlockReopenReadOnly(vm, src, VIR_ASYNC_JOB_NONE)); ignore_value(qemuDomainStorageSourceAccessAllow(driver, vm, src, true, false, false)); @@ -539,7 +539,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, /* Unlike snapshots, the RNG schema already ensured a sane filename. */ /* We are going to modify the domain below. */ - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return NULL; if (redefine) { @@ -561,7 +561,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, checkpoint = virGetDomainCheckpoint(domain, chk->def->name); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return checkpoint; } @@ -588,13 +588,13 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE))) + if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE))) goto endjob; /* enumerate disks relevant for the checkpoint which are also present in the @@ -671,7 +671,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, /* now do a final refresh */ virHashFree(blockNamedNodeData); - if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE))) + if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE))) goto endjob; qemuDomainObjEnterMonitor(vm); @@ -697,7 +697,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -781,7 +781,7 @@ qemuCheckpointDelete(virDomainObjPtr vm, VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY | VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if (!metadata_only) { @@ -849,6 +849,6 @@ qemuCheckpointDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index cc89dec3b4..420c53b82e 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -77,26 +77,26 @@ VIR_LOG_INIT("qemu.qemu_domain"); static virDomainJobType -qemuDomainJobStatusToType(qemuDomainJobStatus status) +virDomainJobStatusToType(virDomainJobStatus status) { switch (status) { - case QEMU_DOMAIN_JOB_STATUS_NONE: + case VIR_DOMAIN_JOB_STATUS_NONE: break; - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: + case VIR_DOMAIN_JOB_STATUS_ACTIVE: + case VIR_DOMAIN_JOB_STATUS_MIGRATING: + case VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_POSTCOPY: + case VIR_DOMAIN_JOB_STATUS_PAUSED: return VIR_DOMAIN_JOB_UNBOUNDED; - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_COMPLETED: return VIR_DOMAIN_JOB_COMPLETED; - case QEMU_DOMAIN_JOB_STATUS_FAILED: + case VIR_DOMAIN_JOB_STATUS_FAILED: return VIR_DOMAIN_JOB_FAILED; - case QEMU_DOMAIN_JOB_STATUS_CANCELED: + case VIR_DOMAIN_JOB_STATUS_CANCELED: return VIR_DOMAIN_JOB_CANCELLED; } @@ -151,11 +151,11 @@ int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, virDomainJobInfoPtr info) { - info->type = qemuDomainJobStatusToType(jobInfo->status); + info->type = virDomainJobStatusToType(jobInfo->status); info->timeElapsed = jobInfo->timeElapsed; switch (jobInfo->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: + case VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION: info->memTotal = jobInfo->stats.mig.ram_total; info->memRemaining = jobInfo->stats.mig.ram_remaining; info->memProcessed = jobInfo->stats.mig.ram_transferred; @@ -168,25 +168,25 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, jobInfo->mirrorStats.transferred; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + case VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: info->memTotal = jobInfo->stats.mig.ram_total; info->memRemaining = jobInfo->stats.mig.ram_remaining; info->memProcessed = jobInfo->stats.mig.ram_transferred; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + case VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP: info->memTotal = jobInfo->stats.dump.total; info->memProcessed = jobInfo->stats.dump.completed; info->memRemaining = info->memTotal - info->memProcessed; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + case VIR_DOMAIN_JOB_STATS_TYPE_BACKUP: info->fileTotal = jobInfo->stats.backup.total; info->fileProcessed = jobInfo->stats.backup.transferred; info->fileRemaining = info->fileTotal - info->fileProcessed; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + case VIR_DOMAIN_JOB_STATS_TYPE_NONE: break; } @@ -315,7 +315,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, /* The remaining stats are disk, mirror, or migration specific * so if this is a SAVEDUMP, we can just skip them */ - if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) + if (jobInfo->statsType == VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) goto done; if (virTypedParamsAddULLong(&par, &npar, &maxpar, @@ -364,7 +364,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, goto error; done: - *type = qemuDomainJobStatusToType(jobInfo->status); + *type = virDomainJobStatusToType(jobInfo->status); *params = par; *nparams = npar; return 0; @@ -407,7 +407,7 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo, stats->total - stats->completed) < 0) goto error; - *type = qemuDomainJobStatusToType(jobInfo->status); + *type = virDomainJobStatusToType(jobInfo->status); *params = par; *nparams = npar; return 0; @@ -459,9 +459,9 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, return -1; } - if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && + if (jobInfo->status != VIR_DOMAIN_JOB_STATUS_ACTIVE && virTypedParamListAddBoolean(par, - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, + jobInfo->status == VIR_DOMAIN_JOB_STATUS_COMPLETED, VIR_DOMAIN_JOB_SUCCESS) < 0) return -1; @@ -470,7 +470,7 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, return -1; *nparams = virTypedParamListStealParams(par, params); - *type = qemuDomainJobStatusToType(jobInfo->status); + *type = virDomainJobStatusToType(jobInfo->status); return 0; } @@ -482,23 +482,23 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, int *nparams) { switch (jobInfo->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + case VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION: + case VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + case VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP: return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + case VIR_DOMAIN_JOB_STATS_TYPE_BACKUP: return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + case VIR_DOMAIN_JOB_STATS_TYPE_NONE: virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("invalid job statistics type")); break; default: - virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); + virReportEnumRangeError(virDomainJobStatsType, jobInfo->statsType); break; } @@ -618,12 +618,12 @@ qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, static int qemuDomainFormatJobPrivate(virBufferPtr buf, - qemuDomainJobObjPtr job, + virDomainJobObjPtr job, virDomainObjPtr vm) { qemuDomainJobPrivatePtr priv = job->privateData; - if (job->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && + if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT && qemuDomainObjPrivateXMLFormatNBDMigration(buf, vm) < 0) return -1; @@ -634,18 +634,18 @@ qemuDomainFormatJobPrivate(virBufferPtr buf, } static void -qemuDomainCurrentJobInfoInit(qemuDomainJobObjPtr job, +qemuDomainCurrentJobInfoInit(virDomainJobObjPtr job, unsigned long long now) { qemuDomainJobPrivatePtr priv = job->privateData; priv->current = g_new0(qemuDomainJobInfo, 1); - priv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; priv->current->started = now; } static void -qemuDomainJobInfoSetOperation(qemuDomainJobObjPtr job, +qemuDomainJobInfoSetOperation(virDomainJobObjPtr job, virDomainJobOperation operation) { qemuDomainJobPrivatePtr priv = job->privateData; @@ -736,7 +736,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, return -1; if (n > 0) { - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) { VIR_WARN("Found disks marked for migration but we were not " "migrating"); n = 0; @@ -762,7 +762,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, static int qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, - qemuDomainJobObjPtr job, + virDomainJobObjPtr job, virDomainObjPtr vm) { qemuDomainJobPrivatePtr priv = job->privateData; @@ -806,7 +806,7 @@ qemuDomainGetMaxQueuedJobs(virDomainObjPtr vm) return cfg->maxQueuedJobs; } -static qemuDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = { +static virDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, @@ -820,7 +820,7 @@ static qemuDomainJobPrivateJobCallbacks qemuJobPrivateJobCallbacks = { .getMaxQueuedJobs = qemuDomainGetMaxQueuedJobs, }; -static qemuDomainJobPrivateCallbacks qemuJobPrivateCallbacks = { +static virDomainJobPrivateCallbacks qemuJobPrivateCallbacks = { .saveStatus = qemuDomainSaveStatus, .jobcb = &qemuJobPrivateJobCallbacks, }; @@ -2276,7 +2276,7 @@ qemuDomainObjPrivateAlloc(void *opaque) if (VIR_ALLOC(priv) < 0) return NULL; - if (qemuDomainObjInitJob(&priv->job, &qemuJobPrivateCallbacks) < 0) { + if (virDomainObjInitJob(&priv->job, &qemuJobPrivateCallbacks) < 0) { virReportSystemError(errno, "%s", _("Unable to init qemu driver mutexes")); goto error; @@ -2387,7 +2387,7 @@ qemuDomainObjPrivateFree(void *data) qemuDomainObjPrivateDataClear(priv); virObjectUnref(priv->monConfig); - qemuDomainObjFreeJob(&priv->job); + virDomainObjFreeJob(&priv->job); VIR_FREE(priv->lockState); VIR_FREE(priv->origname); @@ -2994,7 +2994,7 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf, if (priv->lockState) virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState); - if (qemuDomainObjPrivateXMLFormatJob(buf, vm, &priv->job) < 0) + if (virDomainObjPrivateXMLFormatJob(buf, vm, &priv->job) < 0) return -1; if (priv->fakeReboot) @@ -3653,7 +3653,7 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, priv->lockState = virXPathString("string(./lockstate)", ctxt); - if (qemuDomainObjPrivateXMLParseJob(vm, ctxt, &priv->job) < 0) + if (virDomainObjPrivateXMLParseJob(vm, ctxt, &priv->job) < 0) goto error; priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1; @@ -6083,7 +6083,7 @@ qemuDomainSaveConfig(virDomainObjPtr obj) * obj must be locked before calling * * To be called immediately before any QEMU monitor API call - * Must have already called qemuDomainObjBeginJob() and checked + * Must have already called virDomainObjBeginJob() and checked * that the VM is still active; may not be used for nested async * jobs. * @@ -6091,18 +6091,18 @@ qemuDomainSaveConfig(virDomainObjPtr obj) */ static int qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; - if (asyncJob != QEMU_ASYNC_JOB_NONE) { + if (asyncJob != VIR_ASYNC_JOB_NONE) { int ret; - if ((ret = qemuDomainObjBeginNestedJob(obj, &priv->job, asyncJob)) < 0) + if ((ret = virDomainObjBeginNestedJob(obj, &priv->job, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); - qemuDomainObjEndJob(obj, &priv->job); + virDomainObjEndJob(obj, &priv->job); return -1; } } else if (priv->job.asyncOwner == virThreadSelfID()) { @@ -6111,7 +6111,7 @@ qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj, } else if (priv->job.owner != virThreadSelfID()) { VIR_WARN("Entering a monitor without owning a job. " "Job %s owner %s (%llu)", - qemuDomainJobTypeToString(priv->job.active), + virDomainJobTypeToString(priv->job.active), priv->job.ownerAPI, priv->job.owner); } @@ -6146,13 +6146,13 @@ qemuDomainObjExitMonitorInternal(virDomainObjPtr obj) if (!hasRefs) priv->mon = NULL; - if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjEndJob(obj, &priv->job); + if (priv->job.active == VIR_JOB_ASYNC_NESTED) + virDomainObjEndJob(obj, &priv->job); } void qemuDomainObjEnterMonitor(virDomainObjPtr obj) { - ignore_value(qemuDomainObjEnterMonitorInternal(obj, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuDomainObjEnterMonitorInternal(obj, VIR_ASYNC_JOB_NONE)); } /* obj must NOT be locked before calling @@ -6181,9 +6181,9 @@ int qemuDomainObjExitMonitor(virDomainObjPtr obj) * obj must be locked before calling * * To be called immediately before any QEMU monitor API call. - * Must have already either called qemuDomainObjBeginJob() + * Must have already either called virDomainObjBeginJob() * and checked that the VM is still active, with asyncJob of - * QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob, + * VIR_ASYNC_JOB_NONE; or already called virDomainObjBeginAsyncJob, * with the same asyncJob. * * Returns 0 if job was started, in which case this must be followed with @@ -6193,7 +6193,7 @@ int qemuDomainObjExitMonitor(virDomainObjPtr obj) */ int qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { return qemuDomainObjEnterMonitorInternal(obj, asyncJob); } @@ -6203,7 +6203,7 @@ qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, * obj must be locked before calling * * To be called immediately before any QEMU agent API call. - * Must have already called qemuDomainObjBeginAgentJob() and + * Must have already called virDomainObjBeginAgentJob() and * checked that the VM is still active. * * To be followed with qemuDomainObjExitAgent() once complete @@ -7282,7 +7282,7 @@ qemuDomainRemoveInactiveLocked(virQEMUDriverPtr driver, * qemuDomainRemoveInactiveJob: * * Just like qemuDomainRemoveInactive but it tries to grab a - * QEMU_JOB_MODIFY first. Even though it doesn't succeed in + * VIR_JOB_MODIFY first. Even though it doesn't succeed in * grabbing the job the control carries with * qemuDomainRemoveInactive call. */ @@ -7293,12 +7293,12 @@ qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, bool haveJob; qemuDomainObjPrivatePtr priv = vm->privateData; - haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0; + haveJob = virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) >= 0; qemuDomainRemoveInactive(driver, vm); if (haveJob) - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -7315,12 +7315,12 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, bool haveJob; qemuDomainObjPrivatePtr priv = vm->privateData; - haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0; + haveJob = virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) >= 0; qemuDomainRemoveInactiveLocked(driver, vm); if (haveJob) - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -10210,7 +10210,7 @@ qemuDomainVcpuPersistOrder(virDomainDefPtr def) int qemuDomainCheckMonitor(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 43fb37e786..137877e5fd 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -31,7 +31,7 @@ #include "qemu_monitor.h" #include "qemu_agent.h" #include "qemu_blockjob.h" -#include "qemu_domainjob.h" +#include "virdomainjob.h" #include "qemu_conf.h" #include "qemu_capabilities.h" #include "qemu_migration_params.h" @@ -133,7 +133,7 @@ typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr; struct _qemuDomainObjPrivate { virQEMUDriverPtr driver; - qemuDomainJobObj job; + virDomainJobObj job; virBitmapPtr namespaces; @@ -501,7 +501,7 @@ struct _qemuDomainBackupStats { typedef struct _qemuDomainJobInfo qemuDomainJobInfo; typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; struct _qemuDomainJobInfo { - qemuDomainJobStatus status; + virDomainJobStatus status; virDomainJobOperation operation; unsigned long long started; /* When the async job started */ unsigned long long stopped; /* When the domain's CPUs were stopped */ @@ -518,7 +518,7 @@ struct _qemuDomainJobInfo { destination. */ bool timeDeltaSet; /* Raw values from QEMU */ - qemuDomainJobStatsType statsType; + virDomainJobStatsType statsType; union { qemuMonitorMigrationStats mig; qemuMonitorDumpStats dump; @@ -584,7 +584,7 @@ int qemuDomainObjExitMonitor(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; @@ -967,7 +967,7 @@ void qemuDomainVcpuPersistOrder(virDomainDefPtr def) ATTRIBUTE_NONNULL(1); int qemuDomainCheckMonitor(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); bool qemuDomainSupportsVideoVga(virDomainVideoDefPtr video, virQEMUCapsPtr qemuCaps); diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h deleted file mode 100644 index f7e5cfa1fd..0000000000 --- a/src/qemu/qemu_domainjob.h +++ /dev/null @@ -1,243 +0,0 @@ -/* - * qemu_domainjob.h: helper functions for QEMU domain jobs - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library. If not, see - * <http://www.gnu.org/licenses/>. - */ - -#pragma once - -#include <glib-object.h> - -#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) -#define QEMU_JOB_DEFAULT_MASK \ - (JOB_MASK(QEMU_JOB_QUERY) | \ - JOB_MASK(QEMU_JOB_DESTROY) | \ - JOB_MASK(QEMU_JOB_ABORT)) - -/* Jobs which have to be tracked in domain state XML. */ -#define QEMU_DOMAIN_TRACK_JOBS \ - (JOB_MASK(QEMU_JOB_DESTROY) | \ - JOB_MASK(QEMU_JOB_ASYNC)) - -/* Only 1 job is allowed at any time - * A job includes *all* monitor commands, even those just querying - * information, not merely actions */ -typedef enum { - QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */ - QEMU_JOB_QUERY, /* Doesn't change any state */ - QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */ - QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */ - QEMU_JOB_MODIFY, /* May change state */ - QEMU_JOB_ABORT, /* Abort current async job */ - QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */ - - /* The following two items must always be the last items before JOB_LAST */ - QEMU_JOB_ASYNC, /* Asynchronous job */ - QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */ - - QEMU_JOB_LAST -} qemuDomainJob; -VIR_ENUM_DECL(qemuDomainJob); - -typedef enum { - QEMU_AGENT_JOB_NONE = 0, /* No agent job. */ - QEMU_AGENT_JOB_QUERY, /* Does not change state of domain */ - QEMU_AGENT_JOB_MODIFY, /* May change state of domain */ - - QEMU_AGENT_JOB_LAST -} qemuDomainAgentJob; -VIR_ENUM_DECL(qemuDomainAgentJob); - -/* Async job consists of a series of jobs that may change state. Independent - * jobs that do not change state (and possibly others if explicitly allowed by - * current async job) are allowed to be run even if async job is active. - */ -typedef enum { - QEMU_ASYNC_JOB_NONE = 0, - QEMU_ASYNC_JOB_MIGRATION_OUT, - QEMU_ASYNC_JOB_MIGRATION_IN, - QEMU_ASYNC_JOB_SAVE, - QEMU_ASYNC_JOB_DUMP, - QEMU_ASYNC_JOB_SNAPSHOT, - QEMU_ASYNC_JOB_START, - QEMU_ASYNC_JOB_BACKUP, - - QEMU_ASYNC_JOB_LAST -} qemuDomainAsyncJob; -VIR_ENUM_DECL(qemuDomainAsyncJob); - -typedef enum { - QEMU_DOMAIN_JOB_STATUS_NONE = 0, - QEMU_DOMAIN_JOB_STATUS_ACTIVE, - QEMU_DOMAIN_JOB_STATUS_MIGRATING, - QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED, - QEMU_DOMAIN_JOB_STATUS_PAUSED, - QEMU_DOMAIN_JOB_STATUS_POSTCOPY, - QEMU_DOMAIN_JOB_STATUS_COMPLETED, - QEMU_DOMAIN_JOB_STATUS_FAILED, - QEMU_DOMAIN_JOB_STATUS_CANCELED, -} qemuDomainJobStatus; - -typedef enum { - QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0, - QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION, - QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP, - QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP, - QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP, -} qemuDomainJobStatsType; - -typedef struct _qemuDomainJobObj qemuDomainJobObj; -typedef qemuDomainJobObj *qemuDomainJobObjPtr; - -typedef void *(*qemuDomainObjPrivateJobAlloc)(void); -typedef void (*qemuDomainObjPrivateJobFree)(void *); -typedef void (*qemuDomainObjPrivateJobReset)(void *); -typedef void (*qemuDomainObjPrivateSaveStatus)(virDomainObjPtr); -typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, - qemuDomainJobObjPtr, - virDomainObjPtr); -typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr, qemuDomainJobObjPtr, - virDomainObjPtr); -typedef void (*qemuDomainObjJobInfoSetOperation)(qemuDomainJobObjPtr, - virDomainJobOperation); -typedef void (*qemuDomainObjCurrentJobInfoInit)(qemuDomainJobObjPtr, - unsigned long long); -typedef int (*qemuDomainObjGetJobsQueued)(virDomainObjPtr); -typedef void (*qemuDomainObjIncreaseJobsQueued)(virDomainObjPtr); -typedef void (*qemuDomainObjDecreaseJobsQueued)(virDomainObjPtr); -typedef int (*qemuDomainObjGetMaxQueuedJobs)(virDomainObjPtr); - -typedef struct _qemuDomainJobPrivateJobCallbacks qemuDomainJobPrivateJobCallbacks; -typedef qemuDomainJobPrivateJobCallbacks *qemuDomainJobPrivateJobCallbacksPtr; -struct _qemuDomainJobPrivateJobCallbacks { - qemuDomainObjPrivateJobAlloc allocJobPrivate; - qemuDomainObjPrivateJobFree freeJobPrivate; - qemuDomainObjPrivateJobReset resetJobPrivate; - qemuDomainObjPrivateJobFormat formatJob; - qemuDomainObjPrivateJobParse parseJob; - qemuDomainObjJobInfoSetOperation setJobInfoOperation; - qemuDomainObjCurrentJobInfoInit currentJobInfoInit; - qemuDomainObjGetJobsQueued getJobsQueued; - qemuDomainObjIncreaseJobsQueued increaseJobsQueued; - qemuDomainObjDecreaseJobsQueued decreaseJobsQueued; - qemuDomainObjGetMaxQueuedJobs getMaxQueuedJobs; -}; - -typedef struct _qemuDomainJobPrivateCallbacks qemuDomainJobPrivateCallbacks; -typedef qemuDomainJobPrivateCallbacks *qemuDomainJobPrivateCallbacksPtr; -struct _qemuDomainJobPrivateCallbacks { - /* generic callbacks that we can't really categorize */ - qemuDomainObjPrivateSaveStatus saveStatus; - - /* Job related callbacks */ - qemuDomainJobPrivateJobCallbacksPtr jobcb; -}; - -struct _qemuDomainJobObj { - virCond cond; /* Use to coordinate jobs */ - - /* The following members are for QEMU_JOB_* */ - qemuDomainJob active; /* Currently running job */ - unsigned long long owner; /* Thread id which set current job */ - const char *ownerAPI; /* The API which owns the job */ - unsigned long long started; /* When the current job started */ - - /* The following members are for QEMU_AGENT_JOB_* */ - qemuDomainAgentJob agentActive; /* Currently running agent job */ - unsigned long long agentOwner; /* Thread id which set current agent job */ - const char *agentOwnerAPI; /* The API which owns the agent job */ - unsigned long long agentStarted; /* When the current agent job started */ - - /* The following members are for QEMU_ASYNC_JOB_* */ - virCond asyncCond; /* Use to coordinate with async jobs */ - qemuDomainAsyncJob asyncJob; /* Currently active async job */ - unsigned long long asyncOwner; /* Thread which set current async job */ - const char *asyncOwnerAPI; /* The API which owns the async job */ - unsigned long long asyncStarted; /* When the current async job started */ - int phase; /* Job phase (mainly for migrations) */ - unsigned long long mask; /* Jobs allowed during async job */ - bool abortJob; /* abort of the job requested */ - char *error; /* job event completion error */ - unsigned long apiFlags; /* flags passed to the API which started the async job */ - - void *privateData; /* job specific collection of data */ - qemuDomainJobPrivateCallbacksPtr cb; -}; - -const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, - int phase); -int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, - const char *phase); - -int qemuDomainObjBeginJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainJob job) - G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainAgentJob agentJob) - G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainAsyncJob asyncJob, - virDomainJobOperation operation, - unsigned long apiFlags) - G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainAsyncJob asyncJob) - G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj, - qemuDomainJob job) - G_GNUC_WARN_UNUSED_RESULT; - -void qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj); -void qemuDomainObjEndAgentJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj); -void qemuDomainObjEndAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr jobObj); -void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job); -void qemuDomainObjSetJobPhase(virDomainObjPtr obj, - qemuDomainJobObjPtr job, - int phase); -void qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job, - unsigned long long allowedJobs); -int qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, - qemuDomainJobObjPtr oldJob); -void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job); -void qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job); - -bool qemuDomainTrackJob(qemuDomainJob job); - -void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); - -int -qemuDomainObjInitJob(qemuDomainJobObjPtr job, - qemuDomainJobPrivateCallbacksPtr cb); - -bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob); - -int -qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm, - qemuDomainJobObjPtr jobObj); - -int -qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - xmlXPathContextPtr ctxt, - qemuDomainJobObjPtr job); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index c0b986cddf..ca84f9d9f9 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -155,7 +155,7 @@ static int qemuDomainObjStart(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, unsigned int flags, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); static int qemuDomainManagedSaveLoad(virDomainObjPtr vm, void *opaque); @@ -199,7 +199,7 @@ qemuAutostartDomain(virDomainObjPtr vm, } if (qemuDomainObjStart(NULL, driver, vm, flags, - QEMU_ASYNC_JOB_START) < 0) { + VIR_ASYNC_JOB_START) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to autostart VM '%s': %s"), vm->def->name, virGetLastErrorMessage()); @@ -1753,7 +1753,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, goto cleanup; } - if (qemuProcessStart(conn, driver, vm, NULL, QEMU_ASYNC_JOB_START, + if (qemuProcessStart(conn, driver, vm, NULL, VIR_ASYNC_JOB_START, NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags) < 0) { @@ -1811,15 +1811,15 @@ static int qemuDomainSuspend(virDomainPtr dom) cfg = virQEMUDriverGetConfig(driver); priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_SUSPEND) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_SUSPEND) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) reason = VIR_DOMAIN_PAUSED_MIGRATION; - else if (priv->job.asyncJob == QEMU_ASYNC_JOB_SNAPSHOT) + else if (priv->job.asyncJob == VIR_ASYNC_JOB_SNAPSHOT) reason = VIR_DOMAIN_PAUSED_SNAPSHOT; else reason = VIR_DOMAIN_PAUSED_USER; @@ -1830,7 +1830,7 @@ static int qemuDomainSuspend(virDomainPtr dom) "%s", _("domain is pmsuspended")); goto endjob; } else if (state != VIR_DOMAIN_PAUSED) { - if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessStopCPUs(driver, vm, reason, VIR_ASYNC_JOB_NONE) < 0) goto endjob; } if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) @@ -1838,7 +1838,7 @@ static int qemuDomainSuspend(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -1866,7 +1866,7 @@ static int qemuDomainResume(virDomainPtr dom) if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1886,7 +1886,7 @@ static int qemuDomainResume(virDomainPtr dom) state == VIR_DOMAIN_PAUSED) { if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_UNPAUSED, - QEMU_ASYNC_JOB_NONE) < 0) { + VIR_ASYNC_JOB_NONE) < 0) { if (virGetLastErrorCode() == VIR_ERR_OK) virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("resume operation failed")); @@ -1898,7 +1898,7 @@ static int qemuDomainResume(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -1918,7 +1918,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, int agentFlag = isReboot ? QEMU_AGENT_SHUTDOWN_REBOOT : QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1936,7 +1936,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -1951,7 +1951,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1967,7 +1967,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -2051,8 +2051,8 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, if (!isReboot) agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) return -1; if (!qemuDomainAgentAvailable(vm, agentForced)) @@ -2067,7 +2067,7 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -2080,7 +2080,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -2093,7 +2093,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -2171,7 +2171,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) if (virDomainResetEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2189,7 +2189,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_CRASHED); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2229,7 +2229,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, reason == VIR_DOMAIN_PAUSED_STARTING_UP && !priv->beingDestroyed); - if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, + if (qemuProcessBeginStopJob(vm, VIR_JOB_DESTROY, !(flags & VIR_DOMAIN_DESTROY_GRACEFUL)) < 0) goto cleanup; @@ -2246,11 +2246,11 @@ qemuDomainDestroyFlags(virDomainPtr dom, qemuDomainSetFakeReboot(driver, vm, false); - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED, - QEMU_ASYNC_JOB_NONE, stopFlags); + VIR_ASYNC_JOB_NONE, stopFlags); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_DESTROYED); @@ -2260,7 +2260,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, endjob: if (ret == 0) qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2335,7 +2335,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2421,7 +2421,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2461,7 +2461,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2507,7 +2507,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2530,7 +2530,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2542,7 +2542,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2589,7 +2589,7 @@ static int qemuDomainSendKey(virDomainPtr domain, if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2601,7 +2601,7 @@ static int qemuDomainSendKey(virDomainPtr domain, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2771,7 +2771,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SAVE, + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_SAVE, VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0) goto cleanup; @@ -2781,13 +2781,13 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, goto endjob; } - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Pause */ if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { was_running = true; if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE, - QEMU_ASYNC_JOB_SAVE) < 0) + VIR_ASYNC_JOB_SAVE) < 0) goto endjob; if (!virDomainObjIsActive(vm)) { @@ -2838,13 +2838,13 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, xml = NULL; ret = qemuSaveImageCreate(driver, vm, path, data, compressor, - flags, QEMU_ASYNC_JOB_SAVE); + flags, VIR_ASYNC_JOB_SAVE); if (ret < 0) goto endjob; /* Shut it down */ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_SAVED, - QEMU_ASYNC_JOB_SAVE, 0); + VIR_ASYNC_JOB_SAVE, 0); virDomainAuditStop(vm, "saved"); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_SAVED); @@ -2855,7 +2855,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virErrorPreserveLast(&save_err); if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_SAVE_CANCELED, - QEMU_ASYNC_JOB_SAVE) < 0) { + VIR_ASYNC_JOB_SAVE) < 0) { VIR_WARN("Unable to resume guest CPUs after save failure"); virObjectEventStateQueue(driver->domainEventState, virDomainEventLifecycleNewFromObj(vm, @@ -2865,7 +2865,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virErrorRestore(&save_err); } } - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); if (ret == 0) qemuDomainRemoveInactiveJob(driver, vm); @@ -3106,7 +3106,7 @@ static int qemuDumpToFd(virQEMUDriverPtr driver, virDomainObjPtr vm, int fd, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, const char *dumpformat) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3126,7 +3126,7 @@ qemuDumpToFd(virQEMUDriverPtr driver, return -1; if (detach) - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP; else g_clear_pointer(&jobPriv->current, qemuDomainJobInfoFree); @@ -3215,7 +3215,7 @@ doCoreDump(virQEMUDriverPtr driver, if (STREQ(memory_dump_format, "elf")) memory_dump_format = NULL; - rc = qemuDumpToFd(driver, vm, fd, QEMU_ASYNC_JOB_DUMP, + rc = qemuDumpToFd(driver, vm, fd, VIR_ASYNC_JOB_DUMP, memory_dump_format); } else { if (dumpformat != VIR_DOMAIN_CORE_DUMP_FORMAT_RAW) { @@ -3229,7 +3229,7 @@ doCoreDump(virQEMUDriverPtr driver, goto cleanup; rc = qemuMigrationSrcToFile(driver, vm, fd, compressor, - QEMU_ASYNC_JOB_DUMP); + VIR_ASYNC_JOB_DUMP); } if (rc < 0) @@ -3283,7 +3283,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (virDomainCoreDumpWithFormatEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) goto cleanup; @@ -3293,7 +3293,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, priv = vm->privateData; jobPriv = priv->job.privateData; - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Migrate will always stop the VM, so the resume condition is independent of whether the stop command is issued. */ @@ -3303,7 +3303,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (!(flags & VIR_DUMP_LIVE) && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP, - QEMU_ASYNC_JOB_DUMP) < 0) + VIR_ASYNC_JOB_DUMP) < 0) goto endjob; paused = true; @@ -3322,7 +3322,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, endjob: if ((ret == 0) && (flags & VIR_DUMP_CRASH)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED, - QEMU_ASYNC_JOB_DUMP, 0); + VIR_ASYNC_JOB_DUMP, 0); virDomainAuditStop(vm, "crashed"); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, @@ -3339,7 +3339,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (resume && virDomainObjIsActive(vm)) { if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_UNPAUSED, - QEMU_ASYNC_JOB_DUMP) < 0) { + VIR_ASYNC_JOB_DUMP) < 0) { event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR); @@ -3350,7 +3350,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } } - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); if (ret == 0 && flags & VIR_DUMP_CRASH) qemuDomainRemoveInactiveJob(driver, vm); @@ -3400,7 +3400,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -3474,7 +3474,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (unlink_tmp) unlink(tmp); - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -3522,7 +3522,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, switch (action) { case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) { return; @@ -3539,7 +3539,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, ret = qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_UNPAUSED, - QEMU_ASYNC_JOB_DUMP); + VIR_ASYNC_JOB_DUMP); if (ret < 0) virReportError(VIR_ERR_OPERATION_FAILED, @@ -3550,7 +3550,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); } static int @@ -3599,7 +3599,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, bool removeInactive = false; unsigned long flags = VIR_DUMP_MEMORY_ONLY; - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) return; @@ -3637,7 +3637,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY: qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_CRASHED, - QEMU_ASYNC_JOB_DUMP, 0); + VIR_ASYNC_JOB_DUMP, 0); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_CRASHED); @@ -3665,7 +3665,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); if (removeInactive) qemuDomainRemoveInactiveJob(driver, vm); } @@ -3683,7 +3683,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, VIR_DEBUG("Removing device %s from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -3706,7 +3706,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, devAlias); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -3921,7 +3921,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm, "from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4003,7 +4003,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virNetDevRxFilterFree(hostFilter); @@ -4049,7 +4049,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, memset(&dev, 0, sizeof(dev)); } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4090,7 +4090,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -4104,7 +4104,7 @@ processBlockJobEvent(virDomainObjPtr vm, g_autoptr(qemuBlockJobData) job = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4126,10 +4126,10 @@ processBlockJobEvent(virDomainObjPtr vm, job->newstate = status; - qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -4139,7 +4139,7 @@ processJobStatusChangeEvent(virDomainObjPtr vm, { qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4147,10 +4147,10 @@ processJobStatusChangeEvent(virDomainObjPtr vm, goto endjob; } - qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -4165,7 +4165,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, unsigned int stopFlags = 0; virObjectEventPtr event = NULL; - if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, true) < 0) + if (qemuProcessBeginStopJob(vm, VIR_JOB_DESTROY, true) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4182,7 +4182,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, auditReason = "failed"; } - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) { + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) { stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; qemuMigrationDstErrorSave(driver, vm->def->name, qemuMonitorLastError(priv->mon)); @@ -4190,13 +4190,13 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, eventReason); - qemuProcessStop(driver, vm, stopReason, QEMU_ASYNC_JOB_NONE, stopFlags); + qemuProcessStop(driver, vm, stopReason, VIR_ASYNC_JOB_NONE, stopFlags); virDomainAuditStop(vm, auditReason); virObjectEventStateQueue(driver->domainEventState, event); endjob: qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } @@ -4453,11 +4453,11 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (useAgent) { - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; } else { - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; } @@ -4474,9 +4474,9 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, endjob: if (useAgent) - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); else - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -4602,7 +4602,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -4641,7 +4641,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -4732,7 +4732,7 @@ qemuDomainPinEmulator(virDomainPtr dom, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -4798,7 +4798,7 @@ qemuDomainPinEmulator(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: if (cgroup_emulator) @@ -4919,8 +4919,8 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) goto cleanup; if (flags & VIR_DOMAIN_VCPU_GUEST) { - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4938,7 +4938,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); if (ncpuinfo < 0) goto cleanup; @@ -5003,7 +5003,7 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -5052,7 +5052,7 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm, ret = niothreads; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: if (info_ret) { @@ -5190,7 +5190,7 @@ qemuDomainPinIOThread(virDomainPtr dom, if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5279,7 +5279,7 @@ qemuDomainPinIOThread(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: if (cgroup_iothread) @@ -5640,7 +5640,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5727,7 +5727,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -6076,7 +6076,7 @@ qemuDomainRestoreFlags(virConnectPtr conn, goto cleanup; ret = qemuSaveImageStartVM(conn, driver, vm, &fd, data, path, - false, QEMU_ASYNC_JOB_START); + false, VIR_ASYNC_JOB_START); qemuProcessEndJob(vm); @@ -6290,7 +6290,7 @@ qemuDomainObjRestore(virConnectPtr conn, const char *path, bool start_paused, bool bypass_cache, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { virDomainDefPtr def = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; @@ -6500,7 +6500,7 @@ qemuDomainObjStart(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, unsigned int flags, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { int ret = -1; g_autofree char *managed_save = NULL; @@ -6611,7 +6611,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) } if (qemuDomainObjStart(dom->conn, driver, vm, flags, - QEMU_ASYNC_JOB_START) < 0) + VIR_ASYNC_JOB_START) < 0) goto endjob; dom->id = vm->def->id; @@ -6751,7 +6751,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (!vm->persistent) { @@ -6847,7 +6847,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -7012,7 +7012,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, } if (ret == 0) - ret = qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE); + ret = qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE); return ret; } @@ -7827,7 +7827,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -7839,7 +7839,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -7884,7 +7884,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -7953,7 +7953,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainDefFree(vmdef); @@ -8023,7 +8023,7 @@ qemuDomainDetachDeviceLiveAndConfig(virQEMUDriverPtr driver, if ((rc = qemuDomainDetachDeviceLive(vm, dev_copy, driver, false)) < 0) goto cleanup; - if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (rc == 0 && qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE) < 0) goto cleanup; /* @@ -8106,7 +8106,7 @@ qemuDomainDetachDeviceAliasLiveAndConfig(virQEMUDriverPtr driver, if ((rc = qemuDomainDetachDeviceLive(vm, &dev, driver, true)) < 0) goto cleanup; - if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (rc == 0 && qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE) < 0) goto cleanup; } @@ -8142,7 +8142,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8154,7 +8154,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8180,7 +8180,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8192,7 +8192,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8255,7 +8255,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, autostart = (autostart != 0); if (vm->autostart != autostart) { - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name))) @@ -8293,7 +8293,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, vm->autostart = autostart; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } ret = 0; @@ -8401,7 +8401,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -8435,7 +8435,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8577,7 +8577,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; /* QEMU and LXC implementation are identical */ @@ -8608,7 +8608,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8831,7 +8831,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, } } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -8886,7 +8886,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virBitmapFree(nodeset); @@ -9040,7 +9040,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9082,7 +9082,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9116,7 +9116,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (!(def = virDomainObjGetOneDef(vm, flags))) @@ -9143,7 +9143,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9317,7 +9317,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9551,7 +9551,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainDefFree(persistentDefCopy); @@ -9845,7 +9845,7 @@ qemuDomainBlockResize(virDomainPtr dom, if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -9890,7 +9890,7 @@ qemuDomainBlockResize(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10045,7 +10045,7 @@ qemuDomainBlockStats(virDomainPtr dom, if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10068,7 +10068,7 @@ qemuDomainBlockStats(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10105,7 +10105,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10158,7 +10158,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, *nparams = nstats; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(blockstats); @@ -10252,7 +10252,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10426,7 +10426,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virNetDevBandwidthFree(bandwidth); @@ -10544,7 +10544,7 @@ qemuDomainGetInterfaceParameters(virDomainPtr dom, return ret; } -/* This functions assumes that job QEMU_JOB_QUERY is started by a caller */ +/* This functions assumes that job VIR_JOB_QUERY is started by a caller */ static int qemuDomainMemoryStatsInternal(virDomainObjPtr vm, virDomainMemoryStatPtr stats, @@ -10602,12 +10602,12 @@ qemuDomainMemoryStats(virDomainPtr dom, if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; ret = qemuDomainMemoryStatsInternal(vm, stats, nr_stats); - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10708,7 +10708,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10752,7 +10752,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: VIR_FORCE_CLOSE(fd); @@ -10989,7 +10989,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (!(disk = virDomainDiskByName(vm->def, path, false))) { @@ -11061,7 +11061,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(entry); virDomainObjEndAPI(&vm); @@ -12548,19 +12548,19 @@ qemuDomainGetJobInfoMigrationStats(virDomainObjPtr vm, qemuDomainObjPrivatePtr priv = vm->privateData; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); - if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + if (jobInfo->status == VIR_DOMAIN_JOB_STATUS_ACTIVE || + jobInfo->status == VIR_DOMAIN_JOB_STATUS_MIGRATING || + jobInfo->status == VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED || + jobInfo->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) { if (events && - jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && - qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_NONE, + jobInfo->status != VIR_DOMAIN_JOB_STATUS_ACTIVE && + qemuMigrationAnyFetchStats(vm, VIR_ASYNC_JOB_NONE, jobInfo, NULL) < 0) return -1; - if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && - jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION && - qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_NONE, + if (jobInfo->status == VIR_DOMAIN_JOB_STATUS_ACTIVE && + jobInfo->statsType == VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION && + qemuMigrationSrcFetchMirrorStats(vm, VIR_ASYNC_JOB_NONE, jobInfo) < 0) return -1; @@ -12580,7 +12580,7 @@ qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm, qemuMonitorDumpStats stats = { 0 }; int rc; - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_NONE) < 0) return -1; rc = qemuMonitorQueryDump(priv->mon, &stats); @@ -12604,7 +12604,7 @@ qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm, break; case QEMU_MONITOR_DUMP_STATUS_ACTIVE: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; VIR_DEBUG("dump active, bytes written='%llu' remaining='%llu'", jobInfo->stats.dump.completed, jobInfo->stats.dump.total - @@ -12612,7 +12612,7 @@ qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm, break; case QEMU_MONITOR_DUMP_STATUS_COMPLETED: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; VIR_DEBUG("dump completed, bytes written='%llu'", jobInfo->stats.dump.completed); break; @@ -12640,14 +12640,14 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm, return 0; } - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) { + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("migration statistics are available only on " "the source host")); return -1; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -12660,30 +12660,30 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm, *jobInfo = qemuDomainJobInfoCopy(jobPriv->current); switch ((*jobInfo)->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + case VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION: + case VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: if (qemuDomainGetJobInfoMigrationStats(vm, *jobInfo) < 0) goto cleanup; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + case VIR_DOMAIN_JOB_STATS_TYPE_MEMDUMP: if (qemuDomainGetJobInfoDumpStats(vm, *jobInfo) < 0) goto cleanup; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + case VIR_DOMAIN_JOB_STATS_TYPE_BACKUP: if (qemuBackupGetJobInfoStats(vm, *jobInfo) < 0) goto cleanup; break; - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + case VIR_DOMAIN_JOB_STATS_TYPE_NONE: break; } ret = 0; cleanup: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -12708,7 +12708,7 @@ qemuDomainGetJobInfo(virDomainPtr dom, goto cleanup; if (!jobInfo || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) { + jobInfo->status == VIR_DOMAIN_JOB_STATUS_NONE) { ret = 0; goto cleanup; } @@ -12750,7 +12750,7 @@ qemuDomainGetJobStats(virDomainPtr dom, goto cleanup; if (!jobInfo || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) { + jobInfo->status == VIR_DOMAIN_JOB_STATUS_NONE) { *type = VIR_DOMAIN_JOB_NONE; *params = NULL; *nparams = 0; @@ -12777,7 +12777,7 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) VIR_DEBUG("Cancelling migration job at client request"); - qemuDomainObjAbortAsyncJob(vm, &priv->job); + virDomainObjAbortAsyncJob(vm, &priv->job); qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateCancel(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) @@ -12803,7 +12803,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_ABORT) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_ABORT) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -12812,25 +12812,25 @@ static int qemuDomainAbortJob(virDomainPtr dom) jobPriv = priv->job.privateData; switch (priv->job.asyncJob) { - case QEMU_ASYNC_JOB_NONE: + case VIR_ASYNC_JOB_NONE: virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("no job is active on the domain")); break; - case QEMU_ASYNC_JOB_MIGRATION_IN: + case VIR_ASYNC_JOB_MIGRATION_IN: virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("cannot abort incoming migration;" " use virDomainDestroy instead")); break; - case QEMU_ASYNC_JOB_START: + case VIR_ASYNC_JOB_START: virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("cannot abort VM start;" " use virDomainDestroy instead")); break; - case QEMU_ASYNC_JOB_MIGRATION_OUT: - if ((jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || + case VIR_ASYNC_JOB_MIGRATION_OUT: + if ((jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY || (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY))) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", @@ -12841,11 +12841,11 @@ static int qemuDomainAbortJob(virDomainPtr dom) ret = qemuDomainAbortJobMigration(vm); break; - case QEMU_ASYNC_JOB_SAVE: + case VIR_ASYNC_JOB_SAVE: ret = qemuDomainAbortJobMigration(vm); break; - case QEMU_ASYNC_JOB_DUMP: + case VIR_ASYNC_JOB_DUMP: if (priv->job.apiFlags & VIR_DUMP_MEMORY_ONLY) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("cannot abort memory-only dump")); @@ -12855,23 +12855,23 @@ static int qemuDomainAbortJob(virDomainPtr dom) ret = qemuDomainAbortJobMigration(vm); break; - case QEMU_ASYNC_JOB_SNAPSHOT: + case VIR_ASYNC_JOB_SNAPSHOT: ret = qemuDomainAbortJobMigration(vm); break; - case QEMU_ASYNC_JOB_BACKUP: - qemuBackupJobCancelBlockjobs(vm, priv->backup, true, QEMU_ASYNC_JOB_NONE); + case VIR_ASYNC_JOB_BACKUP: + qemuBackupJobCancelBlockjobs(vm, priv->backup, true, VIR_ASYNC_JOB_NONE); ret = 0; break; - case QEMU_ASYNC_JOB_LAST: + case VIR_ASYNC_JOB_LAST: default: - virReportEnumRangeError(qemuDomainAsyncJob, priv->job.asyncJob); + virReportEnumRangeError(virDomainAsyncJob, priv->job.asyncJob); break; } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -12900,7 +12900,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -12917,7 +12917,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, downtime) < 0) goto endjob; - if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { @@ -12930,7 +12930,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -12959,13 +12959,13 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, VIR_ASYNC_JOB_NONE, &migParams) < 0) goto endjob; @@ -12985,7 +12985,7 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: qemuMigrationParamsFree(migParams); @@ -13015,7 +13015,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13029,7 +13029,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, } if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_XBZRLE_CACHE_SIZE)) { - if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, VIR_ASYNC_JOB_NONE, &migParams) < 0) goto endjob; @@ -13047,7 +13047,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13075,7 +13075,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13098,7 +13098,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, cacheSize) < 0) goto endjob; - if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { @@ -13111,7 +13111,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13158,7 +13158,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13183,7 +13183,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, bandwidth * 1024 * 1024) < 0) goto endjob; - if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { @@ -13201,7 +13201,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13219,13 +13219,13 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) goto cleanup; - if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, VIR_ASYNC_JOB_NONE, &migParams) < 0) goto cleanup; @@ -13256,7 +13256,7 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, ret = 0; cleanup: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -13314,13 +13314,13 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("post-copy can only be started while " "outgoing migration is in progress")); @@ -13341,7 +13341,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14040,7 +14040,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, if (virDomainQemuMonitorCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14056,7 +14056,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14374,7 +14374,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, goto cleanup; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14470,7 +14470,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, qemuBlockJobStarted(job, vm); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: qemuBlockJobStartupFinalize(vm, job); @@ -14504,7 +14504,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14558,13 +14558,13 @@ qemuDomainBlockJobAbort(virDomainPtr dom, ignore_value(virDomainObjSave(vm, driver->xmlopt, cfg->stateDir)); if (!async) { - qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); while (qemuBlockJobIsRunning(job)) { if (virDomainObjWait(vm) < 0) { ret = -1; goto endjob; } - qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); + qemuBlockJobUpdate(vm, job, VIR_ASYNC_JOB_NONE); } if (pivot && @@ -14586,8 +14586,8 @@ qemuDomainBlockJobAbort(virDomainPtr dom, endjob: if (job && !async) - qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE); - qemuDomainObjEndJob(vm, &priv->job); + qemuBlockJobSyncEnd(vm, job, VIR_ASYNC_JOB_NONE); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14666,7 +14666,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, if (virDomainGetBlockJobInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14694,7 +14694,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14736,7 +14736,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14759,7 +14759,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14938,7 +14938,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -15115,7 +15115,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, goto endjob; } } else { - if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE))) + if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_NONE))) goto endjob; if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData, @@ -15157,7 +15157,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (crdata && qemuBlockStorageSourceCreate(vm, mirror, mirrorBacking, mirror->backingStore, - crdata->srcdata[0], QEMU_ASYNC_JOB_NONE) < 0) + crdata->srcdata[0], VIR_ASYNC_JOB_NONE) < 0) goto endjob; } @@ -15214,7 +15214,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (need_unlink && virStorageFileUnlink(mirror) < 0) VIR_WARN("%s", _("unable to remove just-created copy target")); virStorageFileDeinit(mirror); - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); qemuBlockJobStartupFinalize(vm, job); return ret; @@ -15438,7 +15438,7 @@ qemuDomainBlockCommit(virDomainPtr dom, if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -15654,7 +15654,7 @@ qemuDomainBlockCommit(virDomainPtr dom, virErrorRestore(&orig_err); } qemuBlockJobStartupFinalize(vm, job); - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -15683,7 +15683,7 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -15726,7 +15726,7 @@ qemuDomainOpenGraphics(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -15794,14 +15794,14 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom, if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1], "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH)); if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); if (ret < 0) goto cleanup; @@ -16041,7 +16041,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, cfg = virQEMUDriverGetConfig(driver); priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -16305,7 +16305,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(info.group_name); @@ -16349,7 +16349,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; /* the API check guarantees that only one of the definitions will be set */ @@ -16462,7 +16462,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(reply.group_name); @@ -16495,7 +16495,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16536,7 +16536,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, ret = n; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16574,7 +16574,7 @@ qemuDomainSetMetadata(virDomainPtr dom, if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; ret = virDomainObjSetMetadata(vm, type, metadata, key, uri, @@ -16587,7 +16587,7 @@ qemuDomainSetMetadata(virDomainPtr dom, virObjectEventStateQueue(driver->domainEventState, ev); } - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16696,7 +16696,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE)) return -1; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -16705,7 +16705,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, ret = qemuDomainProbeQMPCurrentMachine(vm, wakeupSupported); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -16718,8 +16718,8 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -16733,7 +16733,7 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -16828,7 +16828,7 @@ qemuDomainPMWakeup(virDomainPtr dom, if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16840,7 +16840,7 @@ qemuDomainPMWakeup(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16886,8 +16886,8 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -16905,7 +16905,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, VIR_FREE(result); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16984,8 +16984,8 @@ qemuDomainFSTrim(virDomainPtr dom, if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -16999,7 +16999,7 @@ qemuDomainFSTrim(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17156,8 +17156,8 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17172,7 +17172,7 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -17189,7 +17189,7 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm, int ret = -1; qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17231,7 +17231,7 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -17302,8 +17302,8 @@ qemuDomainGetTime(virDomainPtr dom, if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17322,7 +17322,7 @@ qemuDomainGetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17340,8 +17340,8 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -17355,7 +17355,7 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -17397,7 +17397,7 @@ qemuDomainSetTime(virDomainPtr dom, if (qemuDomainSetTimeAgent(vm, seconds, nseconds, rtcSync) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17417,7 +17417,7 @@ qemuDomainSetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17445,8 +17445,8 @@ qemuDomainFSFreeze(virDomainPtr dom, if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17455,7 +17455,7 @@ qemuDomainFSFreeze(virDomainPtr dom, ret = qemuSnapshotFSFreeze(vm, mountpoints, nmountpoints); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17489,8 +17489,8 @@ qemuDomainFSThaw(virDomainPtr dom, if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17499,7 +17499,7 @@ qemuDomainFSThaw(virDomainPtr dom, ret = qemuSnapshotFSThaw(vm, true); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -18002,7 +18002,7 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver G_GNUC_UNUSED, goto cleanup; if (HAVE_JOB(privflags) && virDomainObjIsActive(dom) && - qemuDomainRefreshVcpuHalted(dom, QEMU_ASYNC_JOB_NONE) < 0) { + qemuDomainRefreshVcpuHalted(dom, VIR_ASYNC_JOB_NONE) < 0) { /* it's ok to be silent and go ahead, because halted vcpu info * wasn't here from the beginning */ virResetLastError(); @@ -18743,9 +18743,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, int rv; if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT) - rv = qemuDomainObjBeginJobNowait(vm, &priv->job, QEMU_JOB_QUERY); + rv = virDomainObjBeginJobNowait(vm, &priv->job, VIR_JOB_QUERY); else - rv = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY); + rv = virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY); if (rv == 0) domflags |= QEMU_DOMAIN_STATS_HAVE_JOB; @@ -18756,7 +18756,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, domflags |= QEMU_DOMAIN_STATS_BACKING; if (qemuDomainGetStats(conn, vm, stats, &tmp, domflags) < 0) { if (HAVE_JOB(domflags) && vm) - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); virObjectUnlock(vm); goto cleanup; @@ -18766,7 +18766,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, tmpstats[nstats++] = tmp; if (HAVE_JOB(domflags)) - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); virObjectUnlock(vm); } @@ -18814,8 +18814,8 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm, qemuAgentPtr agent; qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_QUERY) < 0) return ret; if (virDomainObjCheckActive(vm) < 0) @@ -18829,7 +18829,7 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -18928,7 +18928,7 @@ qemuDomainGetFSInfo(virDomainPtr dom, if ((nfs = qemuDomainGetFSInfoAgent(vm, &agentinfo)) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18937,7 +18937,7 @@ qemuDomainGetFSInfo(virDomainPtr dom, ret = virDomainFSInfoFormat(agentinfo, nfs, vm->def, info); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: g_free(agentinfo); @@ -18976,8 +18976,8 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, break; case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT: - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -18988,7 +18988,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); break; @@ -19031,8 +19031,8 @@ qemuDomainSetUserPassword(virDomainPtr dom, if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19052,7 +19052,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19187,7 +19187,7 @@ static int qemuDomainRename(virDomainPtr dom, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { @@ -19234,7 +19234,7 @@ static int qemuDomainRename(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19327,8 +19327,8 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, - QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, + VIR_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19347,7 +19347,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: VIR_FREE(info); @@ -19389,7 +19389,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -19435,7 +19435,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); cleanup: VIR_FREE(info); @@ -19485,7 +19485,7 @@ qemuDomainSetVcpu(virDomainPtr dom, if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -19512,7 +19512,7 @@ qemuDomainSetVcpu(virDomainPtr dom, ret = qemuDomainSetVcpuInternal(driver, vm, def, persistentDef, map, !!state); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virBitmapFree(map); @@ -19544,7 +19544,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19561,7 +19561,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && !src->nodestorage && - qemuBlockNodeNamesDetect(vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockNodeNamesDetect(vm, VIR_ASYNC_JOB_NONE) < 0) goto endjob; if (!src->nodestorage) { @@ -19581,7 +19581,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19639,7 +19639,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -19670,7 +19670,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19763,7 +19763,7 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm, virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) return -1; qemuDomainObjEnterMonitor(vm); @@ -19783,7 +19783,7 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); return ret; } @@ -19966,7 +19966,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, if (virDomainGetGuestInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_QUERY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -20014,10 +20014,10 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endagentjob: - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); if (nfs > 0) { - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20028,7 +20028,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuAgentFSInfoFormatParams(agentfsinfo, nfs, vm->def, params, nparams, &maxparams); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } cleanup: diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index 7b626ee383..6927739501 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -323,7 +323,7 @@ qemuDomainChangeMediaLegacy(virDomainObjPtr vm, int qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virJSONValue) props = NULL; @@ -368,7 +368,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, */ int qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; @@ -405,7 +405,7 @@ qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, static int qemuHotplugAttachManagedPR(virDomainObjPtr vm, virStorageSourcePtr src, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; virJSONValuePtr props = NULL; @@ -453,7 +453,7 @@ qemuHotplugAttachManagedPR(virDomainObjPtr vm, */ static int qemuHotplugRemoveManagedPR(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; virErrorPtr orig_err; @@ -618,7 +618,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0) goto cleanup; - if (qemuHotplugAttachManagedPR(vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachManagedPR(vm, newsrc, VIR_ASYNC_JOB_NONE) < 0) goto cleanup; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) @@ -654,7 +654,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, /* remove PR manager object if unneeded */ if (managedpr) - ignore_value(qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuHotplugRemoveManagedPR(vm, VIR_ASYNC_JOB_NONE)); /* revert old image do the disk definition */ if (oldsrc) @@ -716,7 +716,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->disks, vm->def->ndisks + 1) < 0) goto cleanup; - if (qemuHotplugAttachManagedPR(vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachManagedPR(vm, disk->src, VIR_ASYNC_JOB_NONE) < 0) goto cleanup; qemuDomainObjEnterMonitor(vm); @@ -779,7 +779,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, ret = -2; if (virStorageSourceChainHasManagedPR(disk->src) && - qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuHotplugRemoveManagedPR(vm, VIR_ASYNC_JOB_NONE) < 0) ret = -2; virDomainAuditDisk(vm, NULL, disk->src, "attach", false); @@ -1665,7 +1665,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, void qemuDomainDelTLSObjects(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, const char *secAlias, const char *tlsAlias) { @@ -1695,7 +1695,7 @@ qemuDomainDelTLSObjects(virDomainObjPtr vm, int qemuDomainAddTLSObjects(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virJSONValuePtr *secProps, virJSONValuePtr *tlsProps) { @@ -1802,7 +1802,7 @@ qemuDomainAddChardevTLSObjects(virQEMUDriverPtr driver, goto cleanup; dev->data.tcp.tlscreds = true; - if (qemuDomainAddTLSObjects(vm, QEMU_ASYNC_JOB_NONE, + if (qemuDomainAddTLSObjects(vm, VIR_ASYNC_JOB_NONE, &secProps, &tlsProps) < 0) goto cleanup; @@ -1922,7 +1922,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, VIR_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2202,7 +2202,7 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, VIR_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2317,7 +2317,7 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, releaseaddr = false; virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, VIR_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2415,13 +2415,13 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, virObjectEventStateQueue(driver->domainEventState, event); /* fix the balloon size */ - ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuProcessRefreshBalloonState(vm, VIR_ASYNC_JOB_NONE)); /* mem is consumed by vm->def */ mem = NULL; /* this step is best effort, removing the device would be so much trouble */ - ignore_value(qemuDomainUpdateMemoryDeviceInfo(vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuDomainUpdateMemoryDeviceInfo(vm, VIR_ASYNC_JOB_NONE)); ret = 0; @@ -4099,7 +4099,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver, if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC, &dev->data.vnc.auth, cfg->vncPassword, - QEMU_ASYNC_JOB_NONE) < 0) + VIR_ASYNC_JOB_NONE) < 0) return -1; /* Steal the new dev's char * reference */ @@ -4146,7 +4146,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver, if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE, &dev->data.spice.auth, cfg->spicePassword, - QEMU_ASYNC_JOB_NONE) < 0) + VIR_ASYNC_JOB_NONE) < 0) return -1; /* Steal the new dev's char * reference */ @@ -4281,7 +4281,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, ignore_value(qemuRemoveSharedDevice(driver, &dev, vm->def->name)); if (virStorageSourceChainHasManagedPR(disk->src) && - qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuHotplugRemoveManagedPR(vm, VIR_ASYNC_JOB_NONE) < 0) goto cleanup; ret = 0; @@ -4358,7 +4358,7 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, virDomainMemoryDefFree(mem); /* fix the balloon size */ - ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuProcessRefreshBalloonState(vm, VIR_ASYNC_JOB_NONE)); /* decrease the mlock limit after memory unplug if necessary */ ignore_value(qemuDomainAdjustMaxMemLock(vm, false)); @@ -5903,7 +5903,7 @@ qemuDomainRemoveVcpu(virDomainObjPtr vm, virErrorPtr save_error = NULL; size_t i; - if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, VIR_ASYNC_JOB_NONE, false) < 0) return -1; /* validation requires us to set the expected state prior to calling it */ @@ -6052,7 +6052,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, if (newhotplug) vm->def->individualvcpus = true; - if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, VIR_ASYNC_JOB_NONE, false) < 0) goto cleanup; /* validation requires us to set the expected state prior to calling it */ diff --git a/src/qemu/qemu_hotplug.h b/src/qemu/qemu_hotplug.h index 51af92f840..3618af87c7 100644 --- a/src/qemu/qemu_hotplug.h +++ b/src/qemu/qemu_hotplug.h @@ -32,12 +32,12 @@ int qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, bool force); void qemuDomainDelTLSObjects(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, const char *secAlias, const char *tlsAlias); int qemuDomainAddTLSObjects(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virJSONValuePtr *secProps, virJSONValuePtr *tlsProps); @@ -146,7 +146,7 @@ unsigned long long qemuDomainGetUnplugTimeout(virDomainObjPtr vm) G_GNUC_NO_INLI int qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 4fa2e4cf62..b29f3130b7 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -83,7 +83,7 @@ VIR_ENUM_IMPL(virMigrationJobPhase, static int qemuMigrationJobStart(virDomainObjPtr vm, - qemuDomainAsyncJob job, + virDomainAsyncJob job, unsigned long apiFlags) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; @@ -103,7 +103,7 @@ qemuMigrationJobContinue(virDomainObjPtr obj) static bool qemuMigrationJobIsActive(virDomainObjPtr vm, - qemuDomainAsyncJob job) + virDomainAsyncJob job) ATTRIBUTE_NONNULL(1); static void @@ -148,7 +148,7 @@ qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm) /* we got here through some sort of failure; start the domain again */ if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_MIGRATION_CANCELED, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) { + VIR_ASYNC_JOB_MIGRATION_OUT) < 0) { /* Hm, we already know we are in error here. We don't want to * overwrite the previous error, though, so we just throw something * to the logs and hope for the best */ @@ -420,7 +420,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, devicename = diskAlias; } - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) goto cleanup; if (port == 0) { @@ -463,7 +463,7 @@ qemuMigrationDstStopNBDServer(virDomainObjPtr vm, if (!mig->nbd) return 0; - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) return -1; if (qemuMonitorNBDServerStop(priv->mon) < 0) @@ -505,7 +505,7 @@ qemuMigrationNBDReportMirrorError(qemuBlockJobDataPtr job, */ static int qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { size_t i; size_t notReady = 0; @@ -559,7 +559,7 @@ qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm, */ static int qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, bool check) { size_t i; @@ -643,7 +643,7 @@ qemuMigrationSrcNBDCopyCancelOne(virDomainObjPtr vm, virDomainDiskDefPtr disk, qemuBlockJobDataPtr job, bool failNoJob, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int rv; @@ -688,7 +688,7 @@ qemuMigrationSrcNBDCopyCancelOne(virDomainObjPtr vm, static int qemuMigrationSrcNBDCopyCancel(virDomainObjPtr vm, bool check, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virConnectPtr dconn) { virErrorPtr err = NULL; @@ -836,7 +836,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virDomainObjPtr vm, false))) return -1; - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0) return -1; mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data); @@ -877,7 +877,7 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virDomainObjPtr vm, diskAlias); } - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0) return -1; mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm), @@ -1032,14 +1032,14 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, } } - while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) { + while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) != 1) { if (rv < 0) return -1; if (priv->job.abortJob) { - jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + virDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); return -1; } @@ -1054,7 +1054,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, return -1; } - qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationSrcFetchMirrorStats(vm, VIR_ASYNC_JOB_MIGRATION_OUT, jobPriv->current); /* Okay, all disks are ready. Modify migrate_flags */ @@ -1406,7 +1406,7 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver, if (state == VIR_DOMAIN_RUNNING) { if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_POSTCOPY_FAILED, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + VIR_ASYNC_JOB_MIGRATION_IN) < 0) VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name); } else { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, @@ -1438,31 +1438,31 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) { switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) { case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_POSTCOPY; break; case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED; break; case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_NONE; break; case QEMU_MONITOR_MIGRATION_STATUS_ERROR: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED; break; case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_CANCELED; break; case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_PAUSED; break; case QEMU_MONITOR_MIGRATION_STATUS_DEVICE: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_MIGRATING; break; case QEMU_MONITOR_MIGRATION_STATUS_SETUP: @@ -1477,7 +1477,7 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) int qemuMigrationAnyFetchStats(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo, char **error) { @@ -1505,23 +1505,23 @@ qemuMigrationJobName(virDomainObjPtr vm) qemuDomainObjPrivatePtr priv = vm->privateData; switch (priv->job.asyncJob) { - case QEMU_ASYNC_JOB_MIGRATION_OUT: + case VIR_ASYNC_JOB_MIGRATION_OUT: return _("migration out job"); - case QEMU_ASYNC_JOB_SAVE: + case VIR_ASYNC_JOB_SAVE: return _("domain save job"); - case QEMU_ASYNC_JOB_DUMP: + case VIR_ASYNC_JOB_DUMP: return _("domain core dump job"); - case QEMU_ASYNC_JOB_NONE: + case VIR_ASYNC_JOB_NONE: return _("undefined"); - case QEMU_ASYNC_JOB_MIGRATION_IN: + case VIR_ASYNC_JOB_MIGRATION_IN: return _("migration in job"); - case QEMU_ASYNC_JOB_SNAPSHOT: + case VIR_ASYNC_JOB_SNAPSHOT: return _("snapshot job"); - case QEMU_ASYNC_JOB_START: + case VIR_ASYNC_JOB_START: return _("start job"); - case QEMU_ASYNC_JOB_BACKUP: + case VIR_ASYNC_JOB_BACKUP: return _("backup job"); - case QEMU_ASYNC_JOB_LAST: + case VIR_ASYNC_JOB_LAST: default: return _("job"); } @@ -1530,7 +1530,7 @@ qemuMigrationJobName(virDomainObjPtr vm) static int qemuMigrationJobCheckStatus(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; @@ -1548,28 +1548,28 @@ qemuMigrationJobCheckStatus(virDomainObjPtr vm, qemuMigrationUpdateJobType(jobInfo); switch (jobInfo->status) { - case QEMU_DOMAIN_JOB_STATUS_NONE: + case VIR_DOMAIN_JOB_STATUS_NONE: virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), qemuMigrationJobName(vm), _("is not active")); goto cleanup; - case QEMU_DOMAIN_JOB_STATUS_FAILED: + case VIR_DOMAIN_JOB_STATUS_FAILED: virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), qemuMigrationJobName(vm), error ? error : _("unexpectedly failed")); goto cleanup; - case QEMU_DOMAIN_JOB_STATUS_CANCELED: + case VIR_DOMAIN_JOB_STATUS_CANCELED: virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuMigrationJobName(vm), _("canceled by client")); goto cleanup; - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: + case VIR_DOMAIN_JOB_STATUS_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_ACTIVE: + case VIR_DOMAIN_JOB_STATUS_MIGRATING: + case VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_POSTCOPY: + case VIR_DOMAIN_JOB_STATUS_PAUSED: break; } @@ -1598,7 +1598,7 @@ enum qemuMigrationCompletedFlags { */ static int qemuMigrationAnyCompleted(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virConnectPtr dconn, unsigned int flags) { @@ -1634,7 +1634,7 @@ qemuMigrationAnyCompleted(virDomainObjPtr vm, * wait again for the real end of the migration. */ if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER && - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { + jobInfo->status == VIR_DOMAIN_JOB_STATUS_PAUSED) { VIR_DEBUG("Migration paused before switchover"); return 1; } @@ -1644,38 +1644,38 @@ qemuMigrationAnyCompleted(virDomainObjPtr vm, * will continue waiting until the migrate state changes to completed. */ if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY && - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + jobInfo->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) { VIR_DEBUG("Migration switched to post-copy"); return 1; } - if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) + if (jobInfo->status == VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED) return 1; else return 0; error: switch (jobInfo->status) { - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: + case VIR_DOMAIN_JOB_STATUS_MIGRATING: + case VIR_DOMAIN_JOB_STATUS_POSTCOPY: + case VIR_DOMAIN_JOB_STATUS_PAUSED: /* The migration was aborted by us rather than QEMU itself. */ - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED; return -2; - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED: /* Something failed after QEMU already finished the migration. */ - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED; return -1; - case QEMU_DOMAIN_JOB_STATUS_FAILED: - case QEMU_DOMAIN_JOB_STATUS_CANCELED: + case VIR_DOMAIN_JOB_STATUS_FAILED: + case VIR_DOMAIN_JOB_STATUS_CANCELED: /* QEMU aborted the migration. */ return -1; - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_NONE: + case VIR_DOMAIN_JOB_STATUS_ACTIVE: + case VIR_DOMAIN_JOB_STATUS_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_NONE: /* Impossible. */ break; } @@ -1689,7 +1689,7 @@ qemuMigrationAnyCompleted(virDomainObjPtr vm, */ static int qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virConnectPtr dconn, unsigned int flags) { @@ -1699,7 +1699,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int rv; - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_MIGRATING; while ((rv = qemuMigrationAnyCompleted(vm, asyncJob, dconn, flags)) != 1) { @@ -1709,7 +1709,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, if (events) { if (virDomainObjWait(vm) < 0) { if (virDomainObjIsActive(vm)) - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_FAILED; return -2; } } else { @@ -1729,11 +1729,11 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, qemuDomainJobInfoUpdateDowntime(jobInfo); g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); jobPriv->completed = qemuDomainJobInfoCopy(jobInfo); - jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobPriv->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; - if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT && + jobInfo->status == VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED) + jobInfo->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; return 0; } @@ -1741,7 +1741,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, static int qemuMigrationDstWaitForCompletion(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, bool postcopy) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -1849,7 +1849,7 @@ qemuMigrationSrcGraphicsRelocate(virDomainObjPtr vm, goto cleanup; } - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress, @@ -1942,7 +1942,7 @@ qemuMigrationDstGetURI(const char *migrateFrom, int qemuMigrationDstRun(virDomainObjPtr vm, const char *uri, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int rv; @@ -1963,7 +1963,7 @@ qemuMigrationDstRun(virDomainObjPtr vm, if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; - if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) { + if (asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) { /* qemuMigrationDstWaitForCompletion is called from the Finish phase */ return 0; } @@ -1991,11 +1991,11 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s", vm->def->name, conn, - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, + virDomainAsyncJobTypeToString(priv->job.asyncJob), + virDomainAsyncJobPhaseToString(priv->job.asyncJob, priv->job.phase)); - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) + if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) return; VIR_DEBUG("The connection which started outgoing migration of domain %s" @@ -2005,17 +2005,17 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, switch ((virMigrationJobPhase) priv->job.phase) { case VIR_MIGRATION_PHASE_BEGIN3: /* just forget we were about to migrate */ - qemuDomainObjDiscardAsyncJob(vm, &priv->job); + virDomainObjDiscardAsyncJob(vm, &priv->job); break; case VIR_MIGRATION_PHASE_PERFORM3_DONE: VIR_WARN("Migration of domain %s finished but we don't know if the" " domain was successfully started on destination or not", vm->def->name); - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); /* clear the job and let higher levels decide what to do */ - qemuDomainObjDiscardAsyncJob(vm, &priv->job); + virDomainObjDiscardAsyncJob(vm, &priv->job); break; case VIR_MIGRATION_PHASE_PERFORM3: @@ -2061,11 +2061,11 @@ qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver, cookieout, cookieoutlen, nmigrate_disks, migrate_disks, flags); - /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT. + /* Only set the phase if we are inside VIR_ASYNC_JOB_MIGRATION_OUT. * Otherwise we will start the async job later in the perform phase losing * change protection. */ - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_BEGIN3); if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags)) @@ -2206,17 +2206,17 @@ qemuMigrationSrcBegin(virConnectPtr conn, virQEMUDriverPtr driver = conn->privateData; qemuDomainObjPrivatePtr priv = vm->privateData; char *xml = NULL; - qemuDomainAsyncJob asyncJob; + virDomainAsyncJob asyncJob; if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_OUT, flags) < 0) goto cleanup; - asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT; + asyncJob = VIR_ASYNC_JOB_MIGRATION_OUT; } else { - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; - asyncJob = QEMU_ASYNC_JOB_NONE; + asyncJob = VIR_ASYNC_JOB_NONE; } qemuMigrationSrcStoreDomainState(vm); @@ -2259,7 +2259,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, if (flags & VIR_MIGRATE_CHANGE_PROTECTION) qemuMigrationJobFinish(vm); else - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); goto cleanup; } @@ -2276,15 +2276,15 @@ qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver, VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s", driver, vm->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + virDomainJobTypeToString(priv->job.active), + virDomainAsyncJobTypeToString(priv->job.asyncJob)); virPortAllocatorRelease(priv->migrationPort); priv->migrationPort = 0; - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) + if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN)) return; - qemuDomainObjDiscardAsyncJob(vm, &priv->job); + virDomainObjDiscardAsyncJob(vm, &priv->job); } static qemuProcessIncomingDefPtr @@ -2523,7 +2523,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0) goto cleanup; - if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_IN, flags) < 0) + if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_IN, flags) < 0) goto cleanup; qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PREPARE); @@ -2539,7 +2539,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY; - if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuProcessInit(driver, vm, mig->cpu, VIR_ASYNC_JOB_MIGRATION_IN, true, startFlags) < 0) goto stopjob; stopProcess = true; @@ -2557,7 +2557,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, if (qemuProcessPrepareHost(driver, vm, startFlags) < 0) goto stopjob; - rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + rv = qemuProcessLaunch(dconn, driver, vm, VIR_ASYNC_JOB_MIGRATION_IN, incoming, NULL, VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, startFlags); @@ -2582,7 +2582,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, goto stopjob; } - if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationParamsCheck(vm, VIR_ASYNC_JOB_MIGRATION_IN, migParams, mig->caps->automatic) < 0) goto stopjob; @@ -2590,7 +2590,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, * set the migration TLS parameters */ if (flags & VIR_MIGRATE_TLS) { if (qemuMigrationParamsEnableTLS(driver, vm, true, - QEMU_ASYNC_JOB_MIGRATION_IN, + VIR_ASYNC_JOB_MIGRATION_IN, &tlsAlias, NULL, migParams) < 0) goto stopjob; @@ -2599,7 +2599,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, goto stopjob; } - if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_IN, migParams) < 0) goto stopjob; @@ -2637,10 +2637,10 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, if (incoming->deferredURI && qemuMigrationDstRun(vm, incoming->deferredURI, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + VIR_ASYNC_JOB_MIGRATION_IN) < 0) goto stopjob; - if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuProcessFinishStartup(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN, false, VIR_DOMAIN_PAUSED_MIGRATION) < 0) goto stopjob; @@ -2706,7 +2706,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, return ret; stopjob: - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN, jobPriv->migParams, priv->job.apiFlags); if (stopProcess) { @@ -2715,7 +2715,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL; virDomainAuditStart(vm, "migrated", false); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags); + VIR_ASYNC_JOB_MIGRATION_IN, stopFlags); } qemuMigrationJobFinish(vm); @@ -3010,7 +3010,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, */ if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY && - qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationAnyFetchStats(vm, VIR_ASYNC_JOB_MIGRATION_OUT, jobInfo, NULL) < 0) VIR_WARN("Could not refresh migration statistics"); @@ -3033,7 +3033,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, qemuMigrationSrcWaitForSpice(vm); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED, - QEMU_ASYNC_JOB_MIGRATION_OUT, + VIR_ASYNC_JOB_MIGRATION_OUT, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "migrated"); @@ -3049,7 +3049,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); /* cancel any outstanding NBD jobs */ - qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_MIGRATION_OUT, NULL); + qemuMigrationSrcNBDCopyCancel(vm, false, VIR_ASYNC_JOB_MIGRATION_OUT, NULL); virErrorRestore(&orig_err); @@ -3059,7 +3059,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, else qemuMigrationSrcRestoreDomainState(driver, vm); - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) @@ -3083,7 +3083,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, cfg = virQEMUDriverGetConfig(driver); - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) + if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) goto cleanup; if (cancelled) @@ -3387,7 +3387,7 @@ qemuMigrationSrcConnect(virQEMUDriverPtr driver, static int qemuMigrationSrcContinue(virDomainObjPtr vm, qemuMonitorMigrationStatus status, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; @@ -3413,10 +3413,10 @@ qemuMigrationSetDBusVMState(virQEMUDriverPtr driver, if (virStringListLength((const char **)priv->dbusVMStateIds) > 0) { int rv; - if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachDBusVMState(driver, vm, VIR_ASYNC_JOB_NONE) < 0) return -1; - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_NONE) < 0) return -1; rv = qemuMonitorSetDBusVMStateIdList(priv->mon, @@ -3427,7 +3427,7 @@ qemuMigrationSetDBusVMState(virQEMUDriverPtr driver, return rv; } else { - if (qemuHotplugRemoveDBusVMState(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugRemoveDBusVMState(vm, VIR_ASYNC_JOB_NONE) < 0) return -1; } @@ -3530,7 +3530,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (qemuMigrationSrcGraphicsRelocate(vm, mig, graphicsuri) < 0) VIR_WARN("unable to provide data for graphics client relocation"); - if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationParamsCheck(vm, VIR_ASYNC_JOB_MIGRATION_OUT, migParams, mig->caps->automatic) < 0) goto error; @@ -3544,7 +3544,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, hostname = spec->dest.host.name; if (qemuMigrationParamsEnableTLS(driver, vm, false, - QEMU_ASYNC_JOB_MIGRATION_OUT, + VIR_ASYNC_JOB_MIGRATION_OUT, &tlsAlias, hostname, migParams) < 0) goto error; @@ -3558,7 +3558,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, migrate_speed * 1024 * 1024) < 0) goto error; - if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationParamsApply(vm, VIR_ASYNC_JOB_MIGRATION_OUT, migParams) < 0) goto error; @@ -3601,20 +3601,20 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (!(flags & VIR_MIGRATE_LIVE) && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + VIR_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; } - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; if (priv->job.abortJob) { /* explicitly do this *after* we entered the monitor, * as this is a critical section so we are guaranteed * priv->job.abortJob will not change */ - jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + virDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); goto exit_monitor; } @@ -3685,7 +3685,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_POSTCOPY) waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY; - rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + rc = qemuMigrationSrcWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_OUT, dconn, waitFlags); if (rc == -2) { goto error; @@ -3708,7 +3708,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (mig->nbd && qemuMigrationSrcNBDCopyCancel(vm, true, - QEMU_ASYNC_JOB_MIGRATION_OUT, + VIR_ASYNC_JOB_MIGRATION_OUT, dconn) < 0) goto error; @@ -3716,14 +3716,14 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, * resume it now once we finished all block jobs and wait for the real * end of the migration. */ - if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { + if (jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) { if (qemuMigrationSrcContinue(vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + VIR_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER; - rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + rc = qemuMigrationSrcWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_OUT, dconn, waitFlags); if (rc == -2) { goto error; @@ -3774,8 +3774,8 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (virDomainObjIsActive(vm)) { if (cancel && - jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && - qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { + jobPriv->current->status != VIR_DOMAIN_JOB_STATUS_QEMU_COMPLETED && + qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMonitorMigrateCancel(priv->mon); ignore_value(qemuDomainObjExitMonitor(vm)); } @@ -3783,11 +3783,11 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, /* cancel any outstanding NBD jobs */ if (mig && mig->nbd) qemuMigrationSrcNBDCopyCancel(vm, false, - QEMU_ASYNC_JOB_MIGRATION_OUT, + VIR_ASYNC_JOB_MIGRATION_OUT, dconn); - if (jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) - jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + if (jobPriv->current->status != VIR_DOMAIN_JOB_STATUS_CANCELED) + jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_FAILED; } if (iothread) @@ -4634,7 +4634,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) + if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_OUT, flags) < 0) goto cleanup; if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0) @@ -4672,7 +4672,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, */ if (!v3proto) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED, - QEMU_ASYNC_JOB_MIGRATION_OUT, + VIR_ASYNC_JOB_MIGRATION_OUT, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "migrated"); event = virDomainEventLifecycleNewFromObj(vm, @@ -4688,7 +4688,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, * here */ if (!v3proto && ret < 0) - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); qemuMigrationSrcRestoreDomainState(driver, vm); @@ -4735,10 +4735,10 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, /* If we didn't start the job in the begin phase, start it now. */ if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationJobStart(vm, VIR_ASYNC_JOB_MIGRATION_OUT, flags) < 0) return ret; - } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) { + } else if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT)) { return ret; } @@ -4764,7 +4764,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, endjob: if (ret < 0) { - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); qemuMigrationJobFinish(vm); } else { @@ -4981,7 +4981,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, port = priv->migrationPort; priv->migrationPort = 0; - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) { + if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN)) { qemuMigrationDstErrorReport(driver, vm->def->name); goto cleanup; } @@ -5017,7 +5017,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* Check for a possible error on the monitor in case Finish was called * earlier than monitor EOF handler got a chance to process the error */ - qemuDomainCheckMonitor(vm, QEMU_ASYNC_JOB_MIGRATION_IN); + qemuDomainCheckMonitor(vm, VIR_ASYNC_JOB_MIGRATION_IN); goto endjob; } @@ -5038,7 +5038,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, goto endjob; if (qemuRefreshVirtioChannelState(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + VIR_ASYNC_JOB_MIGRATION_IN) < 0) goto endjob; if (qemuConnectAgent(driver, vm) < 0) @@ -5066,7 +5066,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* We need to wait for QEMU to process all data sent by the source * before starting guest CPUs. */ - if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationDstWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_IN, !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) { /* There's not much we can do for v2 protocol since the * original domain on the source host is already gone. @@ -5077,14 +5077,14 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* Now that the state data was transferred we can refresh the actual state * of the devices */ - if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { + if (qemuProcessRefreshState(driver, vm, VIR_ASYNC_JOB_MIGRATION_IN) < 0) { /* Similarly to the case above v2 protocol will not be able to recover * from this. Let's ignore this and perhaps stuff will not break. */ if (v3proto) goto endjob; } - if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) inPostCopy = true; if (!(flags & VIR_MIGRATE_PAUSED)) { @@ -5095,7 +5095,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, if (qemuProcessStartCPUs(driver, vm, inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY : VIR_DOMAIN_RUNNING_MIGRATED, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { + VIR_ASYNC_JOB_MIGRATION_IN) < 0) { if (virGetLastErrorCode() == VIR_ERR_OK) virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("resume operation failed")); @@ -5134,7 +5134,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, } if (inPostCopy) { - if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationDstWaitForCompletion(vm, VIR_ASYNC_JOB_MIGRATION_IN, false) < 0) { goto endjob; } @@ -5183,7 +5183,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, virDomainObjIsActive(vm)) { if (doKill) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - QEMU_ASYNC_JOB_MIGRATION_IN, + VIR_ASYNC_JOB_MIGRATION_IN, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); event = virDomainEventLifecycleNewFromObj(vm, @@ -5198,8 +5198,8 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, if (dom) { if (jobInfo) { jobPriv->completed = g_steal_pointer(&jobInfo); - jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; - jobPriv->completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + jobPriv->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; + jobPriv->completed->statsType = VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION; } if (qemuMigrationBakeCookie(mig, driver, vm, @@ -5215,7 +5215,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); } - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN, jobPriv->migParams, priv->job.apiFlags); qemuMigrationJobFinish(vm); @@ -5245,7 +5245,7 @@ int qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, int fd, virCommandPtr compressor, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; bool bwParam = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH); @@ -5426,7 +5426,7 @@ qemuMigrationSrcCancel(virDomainObjPtr vm) if (storage && qemuMigrationSrcNBDCopyCancel(vm, false, - QEMU_ASYNC_JOB_NONE, NULL) < 0) + VIR_ASYNC_JOB_NONE, NULL) < 0) return -1; return 0; @@ -5435,7 +5435,7 @@ qemuMigrationSrcCancel(virDomainObjPtr vm) static int qemuMigrationJobStart(virDomainObjPtr vm, - qemuDomainAsyncJob job, + virDomainAsyncJob job, unsigned long apiFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5443,22 +5443,22 @@ qemuMigrationJobStart(virDomainObjPtr vm, virDomainJobOperation op; unsigned long long mask; - if (job == QEMU_ASYNC_JOB_MIGRATION_IN) { + if (job == VIR_ASYNC_JOB_MIGRATION_IN) { op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN; - mask = QEMU_JOB_NONE; + mask = VIR_JOB_NONE; } else { op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT; - mask = QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MIGRATION_OP); + mask = VIR_JOB_DEFAULT_MASK | + JOB_MASK(VIR_JOB_SUSPEND) | + JOB_MASK(VIR_JOB_MIGRATION_OP); } - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, job, op, apiFlags) < 0) + if (virDomainObjBeginAsyncJob(vm, &priv->job, job, op, apiFlags) < 0) return -1; - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_MIGRATION; - qemuDomainObjSetAsyncJobMask(&priv->job, mask); + virDomainObjSetAsyncJobMask(&priv->job, mask); return 0; } @@ -5475,7 +5475,7 @@ qemuMigrationJobSetPhase(virDomainObjPtr vm, return; } - qemuDomainObjSetJobPhase(vm, &priv->job, phase); + virDomainObjSetJobPhase(vm, &priv->job, phase); } static void @@ -5489,19 +5489,19 @@ static void qemuMigrationJobContinue(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainObjReleaseAsyncJob(&priv->job); + virDomainObjReleaseAsyncJob(&priv->job); } static bool qemuMigrationJobIsActive(virDomainObjPtr vm, - qemuDomainAsyncJob job) + virDomainAsyncJob job) { qemuDomainObjPrivatePtr priv = vm->privateData; if (priv->job.asyncJob != job) { const char *msg; - if (job == QEMU_ASYNC_JOB_MIGRATION_IN) + if (job == VIR_ASYNC_JOB_MIGRATION_IN) msg = _("domain '%s' is not processing incoming migration"); else msg = _("domain '%s' is not being migrated"); @@ -5516,7 +5516,7 @@ static void qemuMigrationJobFinish(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); } @@ -5574,7 +5574,7 @@ qemuMigrationDstErrorReport(virQEMUDriverPtr driver, int qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo) { size_t i; diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 8f5e2d0f81..da087671cc 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -191,7 +191,7 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, int fd, virCommandPtr compressor, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; int @@ -199,7 +199,7 @@ qemuMigrationSrcCancel(virDomainObjPtr vm); int qemuMigrationAnyFetchStats(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo, char **error); @@ -226,7 +226,7 @@ qemuMigrationDstGetURI(const char *migrateFrom, int qemuMigrationDstRun(virDomainObjPtr vm, const char *uri, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); void qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver, @@ -234,5 +234,5 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver, int qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo); diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index 68f4735bc7..ea43060c4d 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -1051,7 +1051,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) jobInfo = g_new0(qemuDomainJobInfo, 1); stats = &jobInfo->stats.mig; - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobInfo->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started); virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped); diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 12f94098c5..c8f835f8d8 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -810,7 +810,7 @@ qemuMigrationParamsApply(virDomainObjPtr vm, if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; - if (asyncJob == QEMU_ASYNC_JOB_NONE) { + if (asyncJob == VIR_ASYNC_JOB_NONE) { if (!virBitmapIsAllClear(migParams->caps)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Migration capabilities can only be set by " @@ -1118,7 +1118,7 @@ qemuMigrationParamsCheck(virDomainObjPtr vm, qemuMigrationParty party; size_t i; - if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) + if (asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) party = QEMU_MIGRATION_SOURCE; else party = QEMU_MIGRATION_DESTINATION; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index b394bcbd3f..ca03486bc4 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -422,7 +422,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, if (vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY || vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE) { - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -432,11 +432,11 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, } qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED, - QEMU_ASYNC_JOB_NONE, 0); + VIR_ASYNC_JOB_NONE, 0); virDomainAuditStop(vm, "destroyed"); qemuDomainRemoveInactive(driver, vm); endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); } ret = 0; @@ -467,7 +467,7 @@ qemuProcessFakeReboot(void *opaque) VIR_DEBUG("vm=%p", vm); virObjectLock(vm); - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -490,7 +490,7 @@ qemuProcessFakeReboot(void *opaque) if (qemuProcessStartCPUs(driver, vm, reason, - QEMU_ASYNC_JOB_NONE) < 0) { + VIR_ASYNC_JOB_NONE) < 0) { if (virGetLastErrorCode() == VIR_ERR_OK) virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("resume operation failed")); @@ -505,7 +505,7 @@ qemuProcessFakeReboot(void *opaque) ret = 0; endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: priv->pausedShutdown = false; @@ -669,8 +669,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, * reveal it in domain state nor sent events */ if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING && !priv->pausedShutdown) { - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) { - if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) { + if (jobPriv->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) reason = VIR_DOMAIN_PAUSED_POSTCOPY; else reason = VIR_DOMAIN_PAUSED_MIGRATION; @@ -1630,7 +1630,7 @@ qemuProcessHandleSpiceMigrated(qemuMonitorPtr mon G_GNUC_UNUSED, priv = vm->privateData; jobPriv = priv->job.privateData; - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) { VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job"); goto cleanup; } @@ -1665,7 +1665,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, priv = vm->privateData; jobPriv = priv->job.privateData; - if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { + if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) { VIR_DEBUG("got MIGRATION event without a migration job"); goto cleanup; } @@ -1674,7 +1674,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainObjBroadcast(vm); if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY && - priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && + priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT && virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_MIGRATION) { VIR_DEBUG("Correcting paused state reason for domain %s to %s", @@ -1714,7 +1714,7 @@ qemuProcessHandleMigrationPass(qemuMonitorPtr mon G_GNUC_UNUSED, vm, vm->def->name, pass); priv = vm->privateData; - if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { + if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) { VIR_DEBUG("got MIGRATION_PASS event without a migration job"); goto cleanup; } @@ -1746,7 +1746,7 @@ qemuProcessHandleDumpCompleted(qemuMonitorPtr mon G_GNUC_UNUSED, priv = vm->privateData; jobPriv = priv->job.privateData; - if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { + if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) { VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job"); goto cleanup; } @@ -1949,7 +1949,7 @@ qemuProcessMonitorLogFree(void *opaque) static int qemuProcessInitMonitor(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { int ret; @@ -2249,7 +2249,7 @@ qemuProcessRefreshChannelVirtioState(virQEMUDriverPtr driver, int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; virHashTablePtr info = NULL; @@ -2586,7 +2586,7 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm G_GNUC_UNUSED) /* set link states to down on interfaces at qemu start */ static int qemuProcessSetLinkStates(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDefPtr def = vm->def; @@ -3207,7 +3207,7 @@ qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig, int qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainRunningReason reason, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { int ret = -1; qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3259,7 +3259,7 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, int qemuProcessStopCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainPausedReason reason, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { int ret = -1; qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3419,7 +3419,7 @@ qemuProcessUpdateState(virDomainObjPtr vm) static int qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, virDomainObjPtr vm, - const qemuDomainJobObj *job, + const virDomainJobObj *job, virDomainState state, int reason) { @@ -3454,7 +3454,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, vm->def->name); if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_MIGRATED, - QEMU_ASYNC_JOB_NONE) < 0) { + VIR_ASYNC_JOB_NONE) < 0) { VIR_WARN("Could not resume domain %s", vm->def->name); } break; @@ -3472,7 +3472,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, break; } - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_NONE, jobPriv->migParams, job->apiFlags); return 0; } @@ -3480,7 +3480,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, static int qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, virDomainObjPtr vm, - const qemuDomainJobObj *job, + const virDomainJobObj *job, virDomainState state, int reason, unsigned int *stopFlags) @@ -3562,13 +3562,13 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_MIGRATION_CANCELED, - QEMU_ASYNC_JOB_NONE) < 0) { + VIR_ASYNC_JOB_NONE) < 0) { VIR_WARN("Could not resume domain %s", vm->def->name); } } } - qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_NONE, jobPriv->migParams, job->apiFlags); return 0; } @@ -3576,7 +3576,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, static int qemuProcessRecoverJob(virQEMUDriverPtr driver, virDomainObjPtr vm, - const qemuDomainJobObj *job, + const virDomainJobObj *job, unsigned int *stopFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3588,21 +3588,21 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, state = virDomainObjGetState(vm, &reason); switch (job->asyncJob) { - case QEMU_ASYNC_JOB_MIGRATION_OUT: + case VIR_ASYNC_JOB_MIGRATION_OUT: if (qemuProcessRecoverMigrationOut(driver, vm, job, state, reason, stopFlags) < 0) return -1; break; - case QEMU_ASYNC_JOB_MIGRATION_IN: + case VIR_ASYNC_JOB_MIGRATION_IN: if (qemuProcessRecoverMigrationIn(driver, vm, job, state, reason) < 0) return -1; break; - case QEMU_ASYNC_JOB_SAVE: - case QEMU_ASYNC_JOB_DUMP: - case QEMU_ASYNC_JOB_SNAPSHOT: + case VIR_ASYNC_JOB_SAVE: + case VIR_ASYNC_JOB_DUMP: + case VIR_ASYNC_JOB_SNAPSHOT: qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorMigrateCancel(priv->mon)); if (qemuDomainObjExitMonitor(vm) < 0) @@ -3612,53 +3612,53 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, * recovering an async job, this function is run at startup * and must resume things using sync monitor connections. */ if (state == VIR_DOMAIN_PAUSED && - ((job->asyncJob == QEMU_ASYNC_JOB_DUMP && + ((job->asyncJob == VIR_ASYNC_JOB_DUMP && reason == VIR_DOMAIN_PAUSED_DUMP) || - (job->asyncJob == QEMU_ASYNC_JOB_SAVE && + (job->asyncJob == VIR_ASYNC_JOB_SAVE && reason == VIR_DOMAIN_PAUSED_SAVE) || - (job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT && + (job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT && (reason == VIR_DOMAIN_PAUSED_SNAPSHOT || reason == VIR_DOMAIN_PAUSED_MIGRATION)) || reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { if (qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_SAVE_CANCELED, - QEMU_ASYNC_JOB_NONE) < 0) { + VIR_ASYNC_JOB_NONE) < 0) { VIR_WARN("Could not resume domain '%s' after migration to file", vm->def->name); } } break; - case QEMU_ASYNC_JOB_START: + case VIR_ASYNC_JOB_START: /* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */ break; - case QEMU_ASYNC_JOB_BACKUP: + case VIR_ASYNC_JOB_BACKUP: ignore_value(virTimeMillisNow(&now)); /* Restore the config of the async job which is not persisted */ priv->jobs_queued++; - priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP; + priv->job.asyncJob = VIR_ASYNC_JOB_BACKUP; priv->job.asyncOwnerAPI = virThreadJobGet(); priv->job.asyncStarted = now; - qemuDomainObjSetAsyncJobMask(&priv->job, - (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MODIFY))); + virDomainObjSetAsyncJobMask(&priv->job, + (VIR_JOB_DEFAULT_MASK | + JOB_MASK(VIR_JOB_SUSPEND) | + JOB_MASK(VIR_JOB_MODIFY))); /* We reset the job parameters for backup so that the job will look * active. This is possible because we are able to recover the state * of blockjobs and also the backup job allows all sub-job types */ jobPriv->current = g_new0(qemuDomainJobInfo, 1); jobPriv->current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; - jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_BACKUP; + jobPriv->current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; jobPriv->current->started = now; break; - case QEMU_ASYNC_JOB_NONE: - case QEMU_ASYNC_JOB_LAST: + case VIR_ASYNC_JOB_NONE: + case VIR_ASYNC_JOB_LAST: break; } @@ -3666,36 +3666,36 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, return -1; /* In case any special handling is added for job type that has been ignored - * before, QEMU_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated + * before, VIR_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated * for the job to be properly tracked in domain state XML. */ switch (job->active) { - case QEMU_JOB_QUERY: + case VIR_JOB_QUERY: /* harmless */ break; - case QEMU_JOB_DESTROY: + case VIR_JOB_DESTROY: VIR_DEBUG("Domain %s should have already been destroyed", vm->def->name); return -1; - case QEMU_JOB_SUSPEND: + case VIR_JOB_SUSPEND: /* mostly harmless */ break; - case QEMU_JOB_MODIFY: + case VIR_JOB_MODIFY: /* XXX depending on the command we may be in an inconsistent state and * we should probably fall back to "monitor error" state and refuse to */ break; - case QEMU_JOB_MIGRATION_OP: - case QEMU_JOB_ABORT: - case QEMU_JOB_ASYNC: - case QEMU_JOB_ASYNC_NESTED: + case VIR_JOB_MIGRATION_OP: + case VIR_JOB_ABORT: + case VIR_JOB_ASYNC: + case VIR_JOB_ASYNC_NESTED: /* async job was already handled above */ - case QEMU_JOB_NONE: - case QEMU_JOB_LAST: + case VIR_JOB_NONE: + case VIR_JOB_LAST: break; } @@ -3715,7 +3715,7 @@ qemuProcessUpdateDevices(virQEMUDriverPtr driver, old = priv->qemuDevices; priv->qemuDevices = NULL; - if (qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainUpdateDeviceList(vm, VIR_ASYNC_JOB_NONE) < 0) goto cleanup; qemuDevices = (const char **)priv->qemuDevices; @@ -4191,7 +4191,7 @@ qemuProcessTranslateCPUFeatures(const char *name, static int qemuProcessFetchGuestCPU(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virCPUDataPtr *enabled, virCPUDataPtr *disabled) { @@ -4297,7 +4297,7 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm, static int qemuProcessUpdateAndVerifyCPU(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { virCPUDataPtr cpu = NULL; virCPUDataPtr disabled = NULL; @@ -4323,7 +4323,7 @@ qemuProcessUpdateAndVerifyCPU(virDomainObjPtr vm, static int qemuProcessFetchCPUDefinitions(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virDomainCapsCPUModelsPtr *cpuModels) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -4345,7 +4345,7 @@ qemuProcessFetchCPUDefinitions(virDomainObjPtr vm, static int qemuProcessUpdateCPU(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { g_autoptr(virCPUData) cpu = NULL; g_autoptr(virCPUData) disabled = NULL; @@ -4562,9 +4562,9 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, /* - * This function starts a new QEMU_ASYNC_JOB_START async job. The user is + * This function starts a new VIR_ASYNC_JOB_START async job. The user is * responsible for calling qemuProcessEndJob to stop this job and for passing - * QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this + * VIR_ASYNC_JOB_START as @asyncJob argument to any function requiring this * parameter between qemuProcessBeginJob and qemuProcessEndJob. */ int @@ -4574,11 +4574,11 @@ qemuProcessBeginJob(virDomainObjPtr vm, { qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_START, + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_START, operation, apiFlags) < 0) return -1; - qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE); + virDomainObjSetAsyncJobMask(&priv->job, VIR_JOB_NONE); return 0; } @@ -4587,7 +4587,7 @@ void qemuProcessEndJob(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); } @@ -5045,7 +5045,7 @@ qemuProcessSetupRawIO(virQEMUDriverPtr driver, static int qemuProcessSetupBalloon(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { unsigned long long balloon = vm->def->mem.cur_balloon; qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5517,7 +5517,7 @@ int qemuProcessInit(virQEMUDriverPtr driver, virDomainObjPtr vm, virCPUDefPtr updatedCPU, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, bool migration, unsigned int flags) { @@ -5910,7 +5910,7 @@ qemuProcessVcpusSortOrder(const void *a, static int qemuProcessSetupHotpluggableVcpus(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def); qemuDomainObjPrivatePtr priv = vm->privateData; @@ -6593,7 +6593,7 @@ qemuProcessGenID(virDomainObjPtr vm, */ static int qemuProcessSetupDiskThrottlingBlockdev(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; size_t i; @@ -6664,7 +6664,7 @@ int qemuProcessLaunch(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuProcessIncomingDefPtr incoming, virDomainMomentObjPtr snapshot, virNetDevVPortProfileOp vmop, @@ -7008,7 +7008,7 @@ qemuProcessLaunch(virConnectPtr conn, int qemuProcessRefreshState(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -7043,7 +7043,7 @@ qemuProcessRefreshState(virQEMUDriverPtr driver, int qemuProcessFinishStartup(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, bool startCPUs, virDomainPausedReason pausedReason) { @@ -7081,7 +7081,7 @@ qemuProcessStart(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, virCPUDefPtr updatedCPU, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, const char *migrateFrom, int migrateFd, const char *migratePath, @@ -7101,7 +7101,7 @@ qemuProcessStart(virConnectPtr conn, "migrateFrom=%s migrateFd=%d migratePath=%s " "snapshot=%p vmop=%d flags=0x%x", conn, driver, vm, vm->def->name, vm->def->id, - qemuDomainAsyncJobTypeToString(asyncJob), + virDomainAsyncJobTypeToString(asyncJob), NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath), snapshot, vmop, flags); @@ -7216,7 +7216,7 @@ qemuProcessCreatePretendCmd(virQEMUDriverPtr driver, if (jsonPropsValidation) buildflags = QEMU_BUILD_COMMANDLINE_VALIDATE_KEEP_JSON; - if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE, + if (qemuProcessInit(driver, vm, NULL, VIR_ASYNC_JOB_NONE, !!migrateURI, flags) < 0) return NULL; @@ -7276,7 +7276,7 @@ qemuProcessKill(virDomainObjPtr vm, unsigned int flags) */ int qemuProcessBeginStopJob(virDomainObjPtr vm, - qemuDomainJob job, + virDomainJob job, bool forceKill) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -7295,7 +7295,7 @@ qemuProcessBeginStopJob(virDomainObjPtr vm, /* Wake up anything waiting on domain condition */ virDomainObjBroadcast(vm); - if (qemuDomainObjBeginJob(vm, &priv->job, job) < 0) + if (virDomainObjBeginJob(vm, &priv->job, job) < 0) goto cleanup; ret = 0; @@ -7309,7 +7309,7 @@ qemuProcessBeginStopJob(virDomainObjPtr vm, void qemuProcessStop(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainShutoffReason reason, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, unsigned int flags) { int ret; @@ -7328,21 +7328,21 @@ void qemuProcessStop(virQEMUDriverPtr driver, vm, vm->def->name, vm->def->id, (long long)vm->pid, virDomainShutoffReasonTypeToString(reason), - qemuDomainAsyncJobTypeToString(asyncJob), + virDomainAsyncJobTypeToString(asyncJob), flags); /* This method is routinely used in clean up paths. Disable error * reporting so we don't squash a legit error. */ virErrorPreserveLast(&orig_err); - if (asyncJob != QEMU_ASYNC_JOB_NONE) { - if (qemuDomainObjBeginNestedJob(vm, &priv->job, asyncJob) < 0) + if (asyncJob != VIR_ASYNC_JOB_NONE) { + if (virDomainObjBeginNestedJob(vm, &priv->job, asyncJob) < 0) goto cleanup; - } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE && + } else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE && priv->job.asyncOwner == virThreadSelfID() && - priv->job.active != QEMU_JOB_ASYNC_NESTED) { + priv->job.active != VIR_JOB_ASYNC_NESTED) { VIR_WARN("qemuProcessStop called without a nested job (async=%s)", - qemuDomainAsyncJobTypeToString(asyncJob)); + virDomainAsyncJobTypeToString(asyncJob)); } if (!virDomainObjIsActive(vm)) { @@ -7558,7 +7558,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, /* clean up a possible backup job */ if (priv->backup) - qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED); + qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED); qemuProcessRemoveDomainStatus(driver, vm); @@ -7641,8 +7641,8 @@ void qemuProcessStop(virQEMUDriverPtr driver, virDomainObjRemoveTransientDef(vm); endjob: - if (asyncJob != QEMU_ASYNC_JOB_NONE) - qemuDomainObjEndJob(vm, &priv->job); + if (asyncJob != VIR_ASYNC_JOB_NONE) + virDomainObjEndJob(vm, &priv->job); cleanup: virErrorRestore(&orig_err); @@ -7661,22 +7661,22 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn); - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) + if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; if (priv->job.asyncJob) { VIR_DEBUG("vm=%s has long-term job active, cancelling", dom->def->name); - qemuDomainObjDiscardAsyncJob(dom, &priv->job); + virDomainObjDiscardAsyncJob(dom, &priv->job); } VIR_DEBUG("Killing domain"); - if (qemuProcessBeginStopJob(dom, QEMU_JOB_DESTROY, true) < 0) + if (qemuProcessBeginStopJob(dom, VIR_JOB_DESTROY, true) < 0) return; qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED, - QEMU_ASYNC_JOB_NONE, stopFlags); + VIR_ASYNC_JOB_NONE, stopFlags); virDomainAuditStop(dom, "destroyed"); event = virDomainEventLifecycleNewFromObj(dom, @@ -7685,7 +7685,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, qemuDomainRemoveInactive(driver, dom); - qemuDomainObjEndJob(dom, &priv->job); + virDomainObjEndJob(dom, &priv->job); virObjectEventStateQueue(driver->domainEventState, event); } @@ -7719,7 +7719,7 @@ bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver, int qemuProcessRefreshDisks(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV); @@ -7775,7 +7775,7 @@ qemuProcessRefreshDisks(virDomainObjPtr vm, static int qemuProcessRefreshCPUMigratability(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDefPtr def = vm->def; @@ -7833,7 +7833,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver, if (!vm->def->cpu) return 0; - if (qemuProcessRefreshCPUMigratability(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshCPUMigratability(vm, VIR_ASYNC_JOB_NONE) < 0) return -1; if (!(host = virQEMUDriverGetHostCPU(driver))) { @@ -7868,7 +7868,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver, if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0) return -1; - if (qemuProcessUpdateCPU(vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessUpdateCPU(vm, VIR_ASYNC_JOB_NONE) < 0) return -1; } else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) { /* We only try to fix CPUs when the libvirt/QEMU combo used to start @@ -8010,7 +8010,7 @@ qemuProcessReconnect(void *opaque) virQEMUDriverPtr driver = data->driver; virDomainObjPtr obj = data->obj; qemuDomainObjPrivatePtr priv; - qemuDomainJobObj oldjob; + virDomainJobObj oldjob; int state; int reason; g_autoptr(virQEMUDriverConfig) cfg = NULL; @@ -8025,13 +8025,13 @@ qemuProcessReconnect(void *opaque) VIR_FREE(data); priv = obj->privateData; - qemuDomainObjRestoreJob(&priv->job, &oldjob); - if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) + virDomainObjRestoreJob(&priv->job, &oldjob); + if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; cfg = virQEMUDriverGetConfig(driver); - if (qemuDomainObjBeginJob(obj, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(obj, &priv->job, VIR_JOB_MODIFY) < 0) goto error; jobStarted = true; @@ -8064,7 +8064,7 @@ qemuProcessReconnect(void *opaque) tryMonReconn = true; /* XXX check PID liveliness & EXE path */ - if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, retry, NULL) < 0) + if (qemuConnectMonitor(driver, obj, VIR_ASYNC_JOB_NONE, retry, NULL) < 0) goto error; priv->machineName = qemuDomainGetMachineName(obj); @@ -8164,12 +8164,12 @@ qemuProcessReconnect(void *opaque) if (qemuProcessRefreshCPU(driver, obj) < 0) goto error; - if (qemuDomainRefreshVcpuInfo(obj, QEMU_ASYNC_JOB_NONE, true) < 0) + if (qemuDomainRefreshVcpuInfo(obj, VIR_ASYNC_JOB_NONE, true) < 0) goto error; qemuDomainVcpuPersistOrder(obj->def); - if (qemuProcessDetectIOThreadPIDs(obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessDetectIOThreadPIDs(obj, VIR_ASYNC_JOB_NONE) < 0) goto error; if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0) @@ -8179,20 +8179,20 @@ qemuProcessReconnect(void *opaque) qemuProcessFiltersInstantiate(obj->def); - if (qemuProcessRefreshDisks(obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshDisks(obj, VIR_ASYNC_JOB_NONE) < 0) goto error; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && - qemuBlockNodeNamesDetect(obj, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockNodeNamesDetect(obj, VIR_ASYNC_JOB_NONE) < 0) goto error; - if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuRefreshVirtioChannelState(driver, obj, VIR_ASYNC_JOB_NONE) < 0) goto error; /* If querying of guest's RTC failed, report error, but do not kill the domain. */ qemuRefreshRTC(obj); - if (qemuProcessRefreshBalloonState(obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshBalloonState(obj, VIR_ASYNC_JOB_NONE) < 0) goto error; if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0) @@ -8258,7 +8258,7 @@ qemuProcessReconnect(void *opaque) if (jobStarted) { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactive(driver, obj); - qemuDomainObjEndJob(obj, &priv->job); + virDomainObjEndJob(obj, &priv->job); } else { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactiveJob(driver, obj); @@ -8291,7 +8291,7 @@ qemuProcessReconnect(void *opaque) * thread didn't have a chance to start playing with the domain yet * (it's all we can do anyway). */ - qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags); + qemuProcessStop(driver, obj, state, VIR_ASYNC_JOB_NONE, stopFlags); } goto cleanup; } @@ -8336,7 +8336,7 @@ qemuProcessReconnectHelper(virDomainObjPtr obj, * object. */ qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED, - QEMU_ASYNC_JOB_NONE, 0); + VIR_ASYNC_JOB_NONE, 0); qemuDomainRemoveInactiveJobLocked(src->driver, obj); virDomainObjEndAPI(&obj); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 448b65537a..fb3cd85bb2 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -32,11 +32,11 @@ int qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig, int qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainRunningReason reason, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuProcessStopCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainPausedReason reason, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuProcessBuildDestroyMemoryPaths(virQEMUDriverPtr driver, virDomainObjPtr vm, @@ -86,7 +86,7 @@ int qemuProcessStart(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, virCPUDefPtr updatedCPU, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, const char *migrateFrom, int stdin_fd, const char *stdin_path, @@ -105,7 +105,7 @@ virCommandPtr qemuProcessCreatePretendCmd(virQEMUDriverPtr driver, int qemuProcessInit(virQEMUDriverPtr driver, virDomainObjPtr vm, virCPUDefPtr updatedCPU, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, bool migration, unsigned int flags); @@ -122,7 +122,7 @@ int qemuProcessPrepareHost(virQEMUDriverPtr driver, int qemuProcessLaunch(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuProcessIncomingDefPtr incoming, virDomainMomentObjPtr snapshot, virNetDevVPortProfileOp vmop, @@ -130,13 +130,13 @@ int qemuProcessLaunch(virConnectPtr conn, int qemuProcessFinishStartup(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, bool startCPUs, virDomainPausedReason pausedReason); int qemuProcessRefreshState(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); typedef enum { VIR_QEMU_PROCESS_STOP_MIGRATED = 1 << 0, @@ -144,12 +144,12 @@ typedef enum { } qemuProcessStopFlags; int qemuProcessBeginStopJob(virDomainObjPtr vm, - qemuDomainJob job, + virDomainJob job, bool forceKill); void qemuProcessStop(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainShutoffReason reason, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, unsigned int flags); typedef enum { @@ -190,13 +190,13 @@ int qemuProcessSetupIOThread(virDomainObjPtr vm, int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuProcessRefreshBalloonState(virDomainObjPtr vm, int asyncJob); int qemuProcessRefreshDisks(virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int qemuProcessStartManagedPRDaemon(virDomainObjPtr vm) G_GNUC_NO_INLINE; diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c index 52468056ad..28d2349869 100644 --- a/src/qemu/qemu_saveimage.c +++ b/src/qemu/qemu_saveimage.c @@ -261,7 +261,7 @@ qemuSaveImageCreate(virQEMUDriverPtr driver, virQEMUSaveDataPtr data, virCommandPtr compressor, unsigned int flags, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); bool needUnlink = false; @@ -578,7 +578,7 @@ qemuSaveImageStartVM(virConnectPtr conn, virQEMUSaveDataPtr data, const char *path, bool start_paused, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; diff --git a/src/qemu/qemu_saveimage.h b/src/qemu/qemu_saveimage.h index f9fecbcc46..39c4ec128a 100644 --- a/src/qemu/qemu_saveimage.h +++ b/src/qemu/qemu_saveimage.h @@ -22,7 +22,7 @@ #include "datatypes.h" #include "qemu_conf.h" -#include "qemu_domainjob.h" +#include "virdomainjob.h" #include "qemu_domain.h" /* It would be nice to replace 'Qemud' with 'Qemu' but @@ -69,7 +69,7 @@ qemuSaveImageStartVM(virConnectPtr conn, virQEMUSaveDataPtr data, const char *path, bool start_paused, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6); int @@ -98,7 +98,7 @@ qemuSaveImageCreate(virQEMUDriverPtr driver, virQEMUSaveDataPtr data, virCommandPtr compressor, unsigned int flags, - qemuDomainAsyncJob asyncJob); + virDomainAsyncJob asyncJob); int virQEMUSaveDataWrite(virQEMUSaveDataPtr data, diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 8d216bbdbd..0692a4e7f3 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -284,7 +284,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver, * domain. Thus we stop and start CPUs ourselves. */ if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE, - QEMU_ASYNC_JOB_SNAPSHOT) < 0) + VIR_ASYNC_JOB_SNAPSHOT) < 0) goto cleanup; resume = true; @@ -295,7 +295,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver, } } - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0) { + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_SNAPSHOT) < 0) { resume = false; goto cleanup; } @@ -313,7 +313,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT, - QEMU_ASYNC_JOB_SNAPSHOT, 0); + VIR_ASYNC_JOB_SNAPSHOT, 0); virDomainAuditStop(vm, "from-snapshot"); resume = false; } @@ -322,7 +322,7 @@ qemuSnapshotCreateActiveInternal(virQEMUDriverPtr driver, if (resume && virDomainObjIsActive(vm) && qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_UNPAUSED, - QEMU_ASYNC_JOB_SNAPSHOT) < 0) { + VIR_ASYNC_JOB_SNAPSHOT) < 0) { event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR); @@ -793,7 +793,7 @@ qemuSnapshotDiskCleanup(qemuSnapshotDiskDataPtr data, size_t ndata, virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { virErrorPtr orig_err; size_t i; @@ -884,7 +884,7 @@ qemuSnapshotDiskPrepareOneBlockdev(virDomainObjPtr vm, virQEMUDriverConfigPtr cfg, bool reuse, virHashTablePtr blockNamedNodeData, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virStorageSource) terminator = NULL; @@ -939,7 +939,7 @@ qemuSnapshotDiskPrepareOne(virQEMUDriverPtr driver, virHashTablePtr blockNamedNodeData, bool reuse, bool blockdev, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, virJSONValuePtr actions) { virDomainDiskDefPtr persistdisk; @@ -1051,7 +1051,7 @@ qemuSnapshotDiskPrepare(virQEMUDriverPtr driver, bool reuse, bool blockdev, virHashTablePtr blockNamedNodeData, - qemuDomainAsyncJob asyncJob, + virDomainAsyncJob asyncJob, qemuSnapshotDiskDataPtr *rdata, size_t *rndata, virJSONValuePtr actions) @@ -1156,7 +1156,7 @@ qemuSnapshotCreateDiskActive(virQEMUDriverPtr driver, virHashTablePtr blockNamedNodeData, unsigned int flags, virQEMUDriverConfigPtr cfg, - qemuDomainAsyncJob asyncJob) + virDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; g_autoptr(virJSONValue) actions = NULL; @@ -1248,16 +1248,16 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) { int freeze; - if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) + if (virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) { - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); goto cleanup; } freeze = qemuSnapshotFSFreeze(vm, NULL, 0); - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); if (freeze < 0) { /* the helper reported the error */ @@ -1281,7 +1281,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (memory && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_LIVE)) { if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SNAPSHOT, - QEMU_ASYNC_JOB_SNAPSHOT) < 0) + VIR_ASYNC_JOB_SNAPSHOT) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -1298,7 +1298,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, * migration step as qemu deactivates bitmaps after migration so the result * would be wrong */ if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && - !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_SNAPSHOT))) + !(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, VIR_ASYNC_JOB_SNAPSHOT))) goto cleanup; /* do the memory snapshot if necessary */ @@ -1309,12 +1309,12 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = VIR_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* allow the migration job to be cancelled or the domain to be paused */ - qemuDomainObjSetAsyncJobMask(&priv->job, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MIGRATION_OP))); + virDomainObjSetAsyncJobMask(&priv->job, (VIR_JOB_DEFAULT_MASK | + JOB_MASK(VIR_JOB_SUSPEND) | + JOB_MASK(VIR_JOB_MIGRATION_OP))); if ((compressed = qemuSaveImageGetCompressionProgram(cfg->snapshotImageFormat, &compressor, @@ -1335,21 +1335,21 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if ((ret = qemuSaveImageCreate(driver, vm, snapdef->file, data, compressor, 0, - QEMU_ASYNC_JOB_SNAPSHOT)) < 0) + VIR_ASYNC_JOB_SNAPSHOT)) < 0) goto cleanup; /* the memory image was created, remove it on errors */ memory_unlink = true; /* forbid any further manipulation */ - qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_DEFAULT_MASK); + virDomainObjSetAsyncJobMask(&priv->job, VIR_JOB_DEFAULT_MASK); } /* the domain is now paused if a memory snapshot was requested */ if ((ret = qemuSnapshotCreateDiskActive(driver, vm, snap, blockNamedNodeData, flags, cfg, - QEMU_ASYNC_JOB_SNAPSHOT)) < 0) + VIR_ASYNC_JOB_SNAPSHOT)) < 0) goto cleanup; /* the snapshot is complete now */ @@ -1357,7 +1357,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT, - QEMU_ASYNC_JOB_SNAPSHOT, 0); + VIR_ASYNC_JOB_SNAPSHOT, 0); virDomainAuditStop(vm, "from-snapshot"); resume = false; thaw = 0; @@ -1379,7 +1379,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (resume && virDomainObjIsActive(vm) && qemuProcessStartCPUs(driver, vm, VIR_DOMAIN_RUNNING_UNPAUSED, - QEMU_ASYNC_JOB_SNAPSHOT) < 0) { + VIR_ASYNC_JOB_SNAPSHOT) < 0) { event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR); @@ -1393,7 +1393,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, } if (thaw != 0 && - qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) >= 0 && + virDomainObjBeginAgentJob(vm, &priv->job, VIR_AGENT_JOB_MODIFY) >= 0 && virDomainObjIsActive(vm)) { if (qemuSnapshotFSThaw(vm, ret == 0 && thaw > 0) < 0) { /* helper reported the error, if it was needed */ @@ -1401,7 +1401,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriverPtr driver, ret = -1; } - qemuDomainObjEndAgentJob(vm, &priv->job); + virDomainObjEndAgentJob(vm, &priv->job); } virQEMUSaveDataFree(data); @@ -1544,11 +1544,11 @@ qemuSnapshotCreateXML(virDomainPtr domain, * a regular job, so we need to set the job mask to disallow query as * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ - if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SNAPSHOT, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0) + if (virDomainObjBeginAsyncJob(vm, &priv->job, VIR_ASYNC_JOB_SNAPSHOT, + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0) goto cleanup; - qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE); + virDomainObjSetAsyncJobMask(&priv->job, VIR_JOB_NONE); if (redefine) { if (virDomainSnapshotRedefinePrep(vm, &def, &snap, @@ -1679,7 +1679,7 @@ qemuSnapshotCreateXML(virDomainPtr domain, virDomainSnapshotObjListRemove(vm->snapshots, snap); } - qemuDomainObjEndAsyncJob(vm, &priv->job); + virDomainObjEndAsyncJob(vm, &priv->job); cleanup: return snapshot; @@ -1719,7 +1719,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, qemuDomainSaveCookiePtr cookie; virCPUDefPtr origCPU = NULL; unsigned int start_flags = VIR_QEMU_PROCESS_START_GEN_VMID; - qemuDomainAsyncJob jobType = QEMU_ASYNC_JOB_START; + virDomainAsyncJob jobType = VIR_ASYNC_JOB_START; bool defined = false; virCheckFlags(VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING | @@ -1891,7 +1891,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, virResetError(err); qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT, - QEMU_ASYNC_JOB_START, 0); + VIR_ASYNC_JOB_START, 0); virDomainAuditStop(vm, "from-snapshot"); detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT; event = virDomainEventLifecycleNewFromObj(vm, @@ -1900,7 +1900,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, virObjectEventStateQueue(driver->domainEventState, event); /* Start after stop won't be an async start job, so * reset to none */ - jobType = QEMU_ASYNC_JOB_NONE; + jobType = VIR_ASYNC_JOB_NONE; goto load; } } @@ -1909,7 +1909,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, /* Transitions 5, 6 */ if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_FROM_SNAPSHOT, - QEMU_ASYNC_JOB_START) < 0) + VIR_ASYNC_JOB_START) < 0) goto endjob; if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -1918,7 +1918,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, } } - if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_START) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_START) < 0) goto endjob; rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name); if (qemuDomainObjExitMonitor(vm) < 0) @@ -2028,7 +2028,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, if (virDomainObjIsActive(vm)) { /* Transitions 4, 7 */ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT, - QEMU_ASYNC_JOB_START, 0); + VIR_ASYNC_JOB_START, 0); virDomainAuditStop(vm, "from-snapshot"); detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT; event = virDomainEventLifecycleNewFromObj(vm, @@ -2057,7 +2057,7 @@ qemuSnapshotRevert(virDomainObjPtr vm, virObjectEventStateQueue(driver->domainEventState, event); rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL, - QEMU_ASYNC_JOB_START, NULL, -1, NULL, NULL, + VIR_ASYNC_JOB_START, NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags); virDomainAuditStart(vm, "from-snapshot", rc >= 0); @@ -2185,7 +2185,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY | VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) + if (virDomainObjBeginJob(vm, &priv->job, VIR_JOB_MODIFY) < 0) goto cleanup; if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot))) @@ -2258,7 +2258,7 @@ qemuSnapshotDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm, &priv->job); + virDomainObjEndJob(vm, &priv->job); cleanup: return ret; -- 2.25.1
participants (1)
-
Prathamesh Chavan