[GSoC][PATCH 0/7] Making `qemu_domainjob` hypervisor-agnostic

Following are a series of patches aimed to make qemu_domainjob hypervisor agnostic, and create a virdomainjob file in the future which handles domain-jobs. Prathamesh Chavan (7): qemu_domain: Added `qemuDomainJobInfo` to domainJob's `privateData` qemu_domainjob: added maxQueuedJobs and jobs_queued to `qemuDomainJob` qemu_domainjob: add `saveDomainStatus` as a callback function to jobs qemu_domain: funciton declarations moved to correct file qemu_domainjob: added `getDomainXMLOptionPtr` callback function qemu_domainjob: removed reference to `qemuDomainObjPrivatePtr` virmigraiton: `qemuMigrationJobPhase` transformed for more generic use src/hypervisor/meson.build | 1 + src/hypervisor/virmigration.c | 41 ++ src/hypervisor/virmigration.h | 38 + src/libvirt_private.syms | 4 + src/qemu/MIGRATION.txt | 8 +- src/qemu/qemu_backup.c | 72 +- src/qemu/qemu_backup.h | 3 +- src/qemu/qemu_block.c | 45 +- src/qemu/qemu_block.h | 6 +- src/qemu/qemu_blockjob.c | 45 +- src/qemu/qemu_blockjob.h | 3 +- src/qemu/qemu_checkpoint.c | 29 +- src/qemu/qemu_domain.c | 595 ++++++++++++++-- src/qemu/qemu_domain.h | 106 ++- src/qemu/qemu_domainjob.c | 845 +++++----------------- src/qemu/qemu_domainjob.h | 152 ++-- src/qemu/qemu_driver.c | 1127 ++++++++++++++++-------------- src/qemu/qemu_hotplug.c | 319 ++++----- src/qemu/qemu_hotplug.h | 30 +- src/qemu/qemu_migration.c | 438 ++++++------ src/qemu/qemu_migration.h | 29 +- src/qemu/qemu_migration_cookie.c | 15 +- src/qemu/qemu_migration_params.c | 48 +- src/qemu/qemu_migration_params.h | 15 +- src/qemu/qemu_process.c | 364 +++++----- src/qemu/qemu_process.h | 15 +- tests/qemuhotplugtest.c | 2 +- 27 files changed, 2211 insertions(+), 2184 deletions(-) create mode 100644 src/hypervisor/virmigration.c create mode 100644 src/hypervisor/virmigration.h -- 2.25.1

As `qemuDomainJobInfo` had attributes specific to qemu hypervisor's jobs, we moved the attribute `current` and `completed` from `qemuDomainJobObj` to its `privateData` structure. In this process, two callback functions: `setJobInfoOperation` and `currentJobInfoInit` were introduced to qemuDomainJob's callback structure. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 22 +- src/qemu/qemu_domain.c | 498 +++++++++++++++++++++++++++++++ src/qemu/qemu_domain.h | 74 +++++ src/qemu/qemu_domainjob.c | 483 +----------------------------- src/qemu/qemu_domainjob.h | 81 +---- src/qemu/qemu_driver.c | 49 +-- src/qemu/qemu_migration.c | 62 ++-- src/qemu/qemu_migration_cookie.c | 8 +- src/qemu/qemu_process.c | 32 +- 9 files changed, 680 insertions(+), 629 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index a402730d38..1822c6f267 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -529,20 +529,21 @@ qemuBackupJobTerminate(virDomainObjPtr vm, { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; size_t i; - qemuDomainJobInfoUpdateTime(priv->job.current); + qemuDomainJobInfoUpdateTime(jobPriv->current); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); - priv->job.completed = qemuDomainJobInfoCopy(priv->job.current); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); + jobPriv->completed = qemuDomainJobInfoCopy(jobPriv->current); - priv->job.completed->stats.backup.total = priv->backup->push_total; - priv->job.completed->stats.backup.transferred = priv->backup->push_transferred; - priv->job.completed->stats.backup.tmp_used = priv->backup->pull_tmp_used; - priv->job.completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; + jobPriv->completed->stats.backup.total = priv->backup->push_total; + jobPriv->completed->stats.backup.transferred = priv->backup->push_transferred; + jobPriv->completed->stats.backup.tmp_used = priv->backup->pull_tmp_used; + jobPriv->completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; - priv->job.completed->status = jobstatus; - priv->job.completed->errmsg = g_strdup(priv->backup->errmsg); + jobPriv->completed->status = jobstatus; + jobPriv->completed->errmsg = g_strdup(priv->backup->errmsg); qemuDomainEventEmitJobCompleted(priv->driver, vm); @@ -694,6 +695,7 @@ qemuBackupBegin(virDomainObjPtr vm, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virDomainBackupDef) def = NULL; g_autofree char *suffix = NULL; @@ -745,7 +747,7 @@ qemuBackupBegin(virDomainObjPtr vm, qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_MODIFY))); - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index c440c79e1d..1ae44ae39f 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -75,6 +75,457 @@ VIR_LOG_INIT("qemu.qemu_domain"); +static virDomainJobType +qemuDomainJobStatusToType(qemuDomainJobStatus status) +{ + switch (status) { + case QEMU_DOMAIN_JOB_STATUS_NONE: + break; + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: + case QEMU_DOMAIN_JOB_STATUS_MIGRATING: + case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + case QEMU_DOMAIN_JOB_STATUS_PAUSED: + return VIR_DOMAIN_JOB_UNBOUNDED; + + case QEMU_DOMAIN_JOB_STATUS_COMPLETED: + return VIR_DOMAIN_JOB_COMPLETED; + + case QEMU_DOMAIN_JOB_STATUS_FAILED: + return VIR_DOMAIN_JOB_FAILED; + + case QEMU_DOMAIN_JOB_STATUS_CANCELED: + return VIR_DOMAIN_JOB_CANCELLED; + } + + return VIR_DOMAIN_JOB_NONE; +} + +int +qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) +{ + unsigned long long now; + + if (!jobInfo->started) + return 0; + + if (virTimeMillisNow(&now) < 0) + return -1; + + if (now < jobInfo->started) { + VIR_WARN("Async job starts in the future"); + jobInfo->started = 0; + return 0; + } + + jobInfo->timeElapsed = now - jobInfo->started; + return 0; +} + +int +qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) +{ + unsigned long long now; + + if (!jobInfo->stopped) + return 0; + + if (virTimeMillisNow(&now) < 0) + return -1; + + if (now < jobInfo->stopped) { + VIR_WARN("Guest's CPUs stopped in the future"); + jobInfo->stopped = 0; + return 0; + } + + jobInfo->stats.mig.downtime = now - jobInfo->stopped; + jobInfo->stats.mig.downtime_set = true; + return 0; +} + + +int +qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, + virDomainJobInfoPtr info) +{ + info->type = qemuDomainJobStatusToType(jobInfo->status); + info->timeElapsed = jobInfo->timeElapsed; + + switch (jobInfo->statsType) { + case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: + info->memTotal = jobInfo->stats.mig.ram_total; + info->memRemaining = jobInfo->stats.mig.ram_remaining; + info->memProcessed = jobInfo->stats.mig.ram_transferred; + info->fileTotal = jobInfo->stats.mig.disk_total + + jobInfo->mirrorStats.total; + info->fileRemaining = jobInfo->stats.mig.disk_remaining + + (jobInfo->mirrorStats.total - + jobInfo->mirrorStats.transferred); + info->fileProcessed = jobInfo->stats.mig.disk_transferred + + jobInfo->mirrorStats.transferred; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + info->memTotal = jobInfo->stats.mig.ram_total; + info->memRemaining = jobInfo->stats.mig.ram_remaining; + info->memProcessed = jobInfo->stats.mig.ram_transferred; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + info->memTotal = jobInfo->stats.dump.total; + info->memProcessed = jobInfo->stats.dump.completed; + info->memRemaining = info->memTotal - info->memProcessed; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + info->fileTotal = jobInfo->stats.backup.total; + info->fileProcessed = jobInfo->stats.backup.transferred; + info->fileRemaining = info->fileTotal - info->fileProcessed; + break; + + case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + break; + } + + info->dataTotal = info->memTotal + info->fileTotal; + info->dataRemaining = info->memRemaining + info->fileRemaining; + info->dataProcessed = info->memProcessed + info->fileProcessed; + + return 0; +} + + +static int +qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; + qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats; + virTypedParameterPtr par = NULL; + int maxpar = 0; + int npar = 0; + unsigned long long mirrorRemaining = mirrorStats->total - + mirrorStats->transferred; + + if (virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_OPERATION, + jobInfo->operation) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_TIME_ELAPSED, + jobInfo->timeElapsed) < 0) + goto error; + + if (jobInfo->timeDeltaSet && + jobInfo->timeElapsed > jobInfo->timeDelta && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_TIME_ELAPSED_NET, + jobInfo->timeElapsed - jobInfo->timeDelta) < 0) + goto error; + + if (stats->downtime_set && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DOWNTIME, + stats->downtime) < 0) + goto error; + + if (stats->downtime_set && + jobInfo->timeDeltaSet && + stats->downtime > jobInfo->timeDelta && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DOWNTIME_NET, + stats->downtime - jobInfo->timeDelta) < 0) + goto error; + + if (stats->setup_time_set && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_SETUP_TIME, + stats->setup_time) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DATA_TOTAL, + stats->ram_total + + stats->disk_total + + mirrorStats->total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DATA_PROCESSED, + stats->ram_transferred + + stats->disk_transferred + + mirrorStats->transferred) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DATA_REMAINING, + stats->ram_remaining + + stats->disk_remaining + + mirrorRemaining) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_TOTAL, + stats->ram_total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_PROCESSED, + stats->ram_transferred) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_REMAINING, + stats->ram_remaining) < 0) + goto error; + + if (stats->ram_bps && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_BPS, + stats->ram_bps) < 0) + goto error; + + if (stats->ram_duplicate_set) { + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_CONSTANT, + stats->ram_duplicate) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_NORMAL, + stats->ram_normal) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES, + stats->ram_normal_bytes) < 0) + goto error; + } + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + stats->ram_dirty_rate) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_ITERATION, + stats->ram_iteration) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_POSTCOPY_REQS, + stats->ram_postcopy_reqs) < 0) + goto error; + + if (stats->ram_page_size > 0 && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_PAGE_SIZE, + stats->ram_page_size) < 0) + goto error; + + /* The remaining stats are disk, mirror, or migration specific + * so if this is a SAVEDUMP, we can just skip them */ + if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) + goto done; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_TOTAL, + stats->disk_total + + mirrorStats->total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_PROCESSED, + stats->disk_transferred + + mirrorStats->transferred) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_REMAINING, + stats->disk_remaining + + mirrorRemaining) < 0) + goto error; + + if (stats->disk_bps && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DISK_BPS, + stats->disk_bps) < 0) + goto error; + + if (stats->xbzrle_set) { + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_CACHE, + stats->xbzrle_cache_size) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_BYTES, + stats->xbzrle_bytes) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_PAGES, + stats->xbzrle_pages) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES, + stats->xbzrle_cache_miss) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW, + stats->xbzrle_overflow) < 0) + goto error; + } + + if (stats->cpu_throttle_percentage && + virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE, + stats->cpu_throttle_percentage) < 0) + goto error; + + done: + *type = qemuDomainJobStatusToType(jobInfo->status); + *params = par; + *nparams = npar; + return 0; + + error: + virTypedParamsFree(par, npar); + return -1; +} + + +static int +qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + qemuMonitorDumpStats *stats = &jobInfo->stats.dump; + virTypedParameterPtr par = NULL; + int maxpar = 0; + int npar = 0; + + if (virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_OPERATION, + jobInfo->operation) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_TIME_ELAPSED, + jobInfo->timeElapsed) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_TOTAL, + stats->total) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_PROCESSED, + stats->completed) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_REMAINING, + stats->total - stats->completed) < 0) + goto error; + + *type = qemuDomainJobStatusToType(jobInfo->status); + *params = par; + *nparams = npar; + return 0; + + error: + virTypedParamsFree(par, npar); + return -1; +} + + +static int +qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + qemuDomainBackupStats *stats = &jobInfo->stats.backup; + g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); + + if (virTypedParamListAddInt(par, jobInfo->operation, + VIR_DOMAIN_JOB_OPERATION) < 0) + return -1; + + if (virTypedParamListAddULLong(par, jobInfo->timeElapsed, + VIR_DOMAIN_JOB_TIME_ELAPSED) < 0) + return -1; + + if (stats->transferred > 0 || stats->total > 0) { + if (virTypedParamListAddULLong(par, stats->total, + VIR_DOMAIN_JOB_DISK_TOTAL) < 0) + return -1; + + if (virTypedParamListAddULLong(par, stats->transferred, + VIR_DOMAIN_JOB_DISK_PROCESSED) < 0) + return -1; + + if (virTypedParamListAddULLong(par, stats->total - stats->transferred, + VIR_DOMAIN_JOB_DISK_REMAINING) < 0) + return -1; + } + + if (stats->tmp_used > 0 || stats->tmp_total > 0) { + if (virTypedParamListAddULLong(par, stats->tmp_used, + VIR_DOMAIN_JOB_DISK_TEMP_USED) < 0) + return -1; + + if (virTypedParamListAddULLong(par, stats->tmp_total, + VIR_DOMAIN_JOB_DISK_TEMP_TOTAL) < 0) + return -1; + } + + if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && + virTypedParamListAddBoolean(par, + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, + VIR_DOMAIN_JOB_SUCCESS) < 0) + return -1; + + if (jobInfo->errmsg && + virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) + return -1; + + *nparams = virTypedParamListStealParams(par, params); + *type = qemuDomainJobStatusToType(jobInfo->status); + return 0; +} + + +int +qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) +{ + switch (jobInfo->statsType) { + case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: + case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: + return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); + + case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: + return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); + + case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: + return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); + + case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("invalid job statistics type")); + break; + + default: + virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); + break; + } + + return -1; +} + + +void +qemuDomainJobInfoFree(qemuDomainJobInfoPtr info) +{ + g_free(info->errmsg); + g_free(info); +} + + +qemuDomainJobInfoPtr +qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info) +{ + qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1); + + memcpy(ret, info, sizeof(*info)); + + ret->errmsg = g_strdup(info->errmsg); + + return ret; +} + + static void * qemuJobAllocPrivate(void) { @@ -91,6 +542,8 @@ qemuJobFreePrivate(void *opaque) return; qemuMigrationParamsFree(priv->migParams); + g_clear_pointer(&priv->current, qemuDomainJobInfoFree); + g_clear_pointer(&priv->completed, qemuDomainJobInfoFree); VIR_FREE(priv); } @@ -104,6 +557,7 @@ qemuJobResetPrivate(void *opaque) priv->spiceMigrated = false; priv->dumpCompleted = false; qemuMigrationParamsFree(priv->migParams); + g_clear_pointer(&priv->current, qemuDomainJobInfoFree); priv->migParams = NULL; } @@ -120,6 +574,48 @@ qemuDomainFormatJobPrivate(virBufferPtr buf, return 0; } +static void +qemuDomainCurrentJobInfoInit(qemuDomainJobObjPtr job, + unsigned long long now) +{ + qemuDomainJobPrivatePtr priv = job->privateData; + priv->current = g_new0(qemuDomainJobInfo, 1); + priv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->current->started = now; + +} + +static void +qemuDomainJobInfoSetOperation(qemuDomainJobObjPtr job, + virDomainJobOperation operation) +{ + qemuDomainJobPrivatePtr priv = job->privateData; + priv->current->operation = operation; +} + +void +qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + virObjectEventPtr event; + virTypedParameterPtr params = NULL; + int nparams = 0; + int type; + + if (!jobPriv->completed) + return; + + if (qemuDomainJobInfoToParams(jobPriv->completed, &type, + ¶ms, &nparams) < 0) { + VIR_WARN("Could not get stats for completed job; domain %s", + vm->def->name); + } + + event = virDomainEventJobCompletedNewFromObj(vm, params, nparams); + virObjectEventStateQueue(driver->domainEventState, event); +} static int qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, @@ -140,6 +636,8 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .resetJobPrivate = qemuJobResetPrivate, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, + .setJobInfoOperation = qemuDomainJobInfoSetOperation, + .currentJobInfoInit = qemuDomainCurrentJobInfoInit, }; /** diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 3a1bcbbfa3..386ae17272 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -483,6 +483,52 @@ struct _qemuDomainXmlNsDef { char **capsdel; }; +typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats; +typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr; +struct _qemuDomainMirrorStats { + unsigned long long transferred; + unsigned long long total; +}; + +typedef struct _qemuDomainBackupStats qemuDomainBackupStats; +struct _qemuDomainBackupStats { + unsigned long long transferred; + unsigned long long total; + unsigned long long tmp_used; + unsigned long long tmp_total; +}; + +typedef struct _qemuDomainJobInfo qemuDomainJobInfo; +typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; +struct _qemuDomainJobInfo { + qemuDomainJobStatus status; + virDomainJobOperation operation; + unsigned long long started; /* When the async job started */ + unsigned long long stopped; /* When the domain's CPUs were stopped */ + unsigned long long sent; /* When the source sent status info to the + destination (only for migrations). */ + unsigned long long received; /* When the destination host received status + info from the source (migrations only). */ + /* Computed values */ + unsigned long long timeElapsed; + long long timeDelta; /* delta = received - sent, i.e., the difference + between the source and the destination time plus + the time between the end of Perform phase on the + source and the beginning of Finish phase on the + destination. */ + bool timeDeltaSet; + /* Raw values from QEMU */ + qemuDomainJobStatsType statsType; + union { + qemuMonitorMigrationStats mig; + qemuMonitorDumpStats dump; + qemuDomainBackupStats backup; + } stats; + qemuDomainMirrorStats mirrorStats; + + char *errmsg; /* optional error message for failed completed jobs */ +}; + typedef struct _qemuDomainJobPrivate qemuDomainJobPrivate; typedef qemuDomainJobPrivate *qemuDomainJobPrivatePtr; struct _qemuDomainJobPrivate { @@ -491,8 +537,36 @@ struct _qemuDomainJobPrivate { bool spiceMigrated; /* spice migration completed */ bool dumpCompleted; /* dump completed */ qemuMigrationParamsPtr migParams; + qemuDomainJobInfoPtr current; /* async job progress data */ + qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */ }; + +void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, + virDomainObjPtr vm); + +void +qemuDomainJobInfoFree(qemuDomainJobInfoPtr info); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree); + +qemuDomainJobInfoPtr +qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info); + +int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) + ATTRIBUTE_NONNULL(1); +int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) + ATTRIBUTE_NONNULL(1); +int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, + virDomainJobInfoPtr info) + ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); +int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + int *type, + virTypedParameterPtr *params, + int *nparams) + ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) + ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4); + int qemuDomainObjStartWorker(virDomainObjPtr dom); void qemuDomainObjStopWorker(virDomainObjPtr dom); diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 6393cc0b40..503a87bb12 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -115,51 +115,6 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, return -1; } - -void -qemuDomainJobInfoFree(qemuDomainJobInfoPtr info) -{ - g_free(info->errmsg); - g_free(info); -} - - -qemuDomainJobInfoPtr -qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info) -{ - qemuDomainJobInfoPtr ret = g_new0(qemuDomainJobInfo, 1); - - memcpy(ret, info, sizeof(*info)); - - ret->errmsg = g_strdup(info->errmsg); - - return ret; -} - -void -qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, - virDomainObjPtr vm) -{ - qemuDomainObjPrivatePtr priv = vm->privateData; - virObjectEventPtr event; - virTypedParameterPtr params = NULL; - int nparams = 0; - int type; - - if (!priv->job.completed) - return; - - if (qemuDomainJobInfoToParams(priv->job.completed, &type, - ¶ms, &nparams) < 0) { - VIR_WARN("Could not get stats for completed job; domain %s", - vm->def->name); - } - - event = virDomainEventJobCompletedNewFromObj(vm, params, nparams); - virObjectEventStateQueue(driver->domainEventState, event); -} - - int qemuDomainObjInitJob(qemuDomainJobObjPtr job, qemuDomainObjPrivateJobCallbacksPtr cb) @@ -216,7 +171,6 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) job->mask = QEMU_JOB_DEFAULT_MASK; job->abortJob = false; VIR_FREE(job->error); - g_clear_pointer(&job->current, qemuDomainJobInfoFree); job->cb->resetJobPrivate(job->privateData); job->apiFlags = 0; } @@ -251,8 +205,6 @@ qemuDomainObjFreeJob(qemuDomainJobObjPtr job) qemuDomainObjResetJob(job); qemuDomainObjResetAsyncJob(job); job->cb->freeJobPrivate(job->privateData); - g_clear_pointer(&job->current, qemuDomainJobInfoFree); - g_clear_pointer(&job->completed, qemuDomainJobInfoFree); virCondDestroy(&job->cond); virCondDestroy(&job->asyncCond); } @@ -264,435 +216,6 @@ qemuDomainTrackJob(qemuDomainJob job) } -int -qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) -{ - unsigned long long now; - - if (!jobInfo->started) - return 0; - - if (virTimeMillisNow(&now) < 0) - return -1; - - if (now < jobInfo->started) { - VIR_WARN("Async job starts in the future"); - jobInfo->started = 0; - return 0; - } - - jobInfo->timeElapsed = now - jobInfo->started; - return 0; -} - -int -qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) -{ - unsigned long long now; - - if (!jobInfo->stopped) - return 0; - - if (virTimeMillisNow(&now) < 0) - return -1; - - if (now < jobInfo->stopped) { - VIR_WARN("Guest's CPUs stopped in the future"); - jobInfo->stopped = 0; - return 0; - } - - jobInfo->stats.mig.downtime = now - jobInfo->stopped; - jobInfo->stats.mig.downtime_set = true; - return 0; -} - -static virDomainJobType -qemuDomainJobStatusToType(qemuDomainJobStatus status) -{ - switch (status) { - case QEMU_DOMAIN_JOB_STATUS_NONE: - break; - - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: - return VIR_DOMAIN_JOB_UNBOUNDED; - - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - return VIR_DOMAIN_JOB_COMPLETED; - - case QEMU_DOMAIN_JOB_STATUS_FAILED: - return VIR_DOMAIN_JOB_FAILED; - - case QEMU_DOMAIN_JOB_STATUS_CANCELED: - return VIR_DOMAIN_JOB_CANCELLED; - } - - return VIR_DOMAIN_JOB_NONE; -} - -int -qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, - virDomainJobInfoPtr info) -{ - info->type = qemuDomainJobStatusToType(jobInfo->status); - info->timeElapsed = jobInfo->timeElapsed; - - switch (jobInfo->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; - info->fileTotal = jobInfo->stats.mig.disk_total + - jobInfo->mirrorStats.total; - info->fileRemaining = jobInfo->stats.mig.disk_remaining + - (jobInfo->mirrorStats.total - - jobInfo->mirrorStats.transferred); - info->fileProcessed = jobInfo->stats.mig.disk_transferred + - jobInfo->mirrorStats.transferred; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - info->memTotal = jobInfo->stats.dump.total; - info->memProcessed = jobInfo->stats.dump.completed; - info->memRemaining = info->memTotal - info->memProcessed; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - info->fileTotal = jobInfo->stats.backup.total; - info->fileProcessed = jobInfo->stats.backup.transferred; - info->fileRemaining = info->fileTotal - info->fileProcessed; - break; - - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: - break; - } - - info->dataTotal = info->memTotal + info->fileTotal; - info->dataRemaining = info->memRemaining + info->fileRemaining; - info->dataProcessed = info->memProcessed + info->fileProcessed; - - return 0; -} - - -static int -qemuDomainMigrationJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; - qemuDomainMirrorStatsPtr mirrorStats = &jobInfo->mirrorStats; - virTypedParameterPtr par = NULL; - int maxpar = 0; - int npar = 0; - unsigned long long mirrorRemaining = mirrorStats->total - - mirrorStats->transferred; - - if (virTypedParamsAddInt(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_OPERATION, - jobInfo->operation) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) - goto error; - - if (jobInfo->timeDeltaSet && - jobInfo->timeElapsed > jobInfo->timeDelta && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_TIME_ELAPSED_NET, - jobInfo->timeElapsed - jobInfo->timeDelta) < 0) - goto error; - - if (stats->downtime_set && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DOWNTIME, - stats->downtime) < 0) - goto error; - - if (stats->downtime_set && - jobInfo->timeDeltaSet && - stats->downtime > jobInfo->timeDelta && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DOWNTIME_NET, - stats->downtime - jobInfo->timeDelta) < 0) - goto error; - - if (stats->setup_time_set && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_SETUP_TIME, - stats->setup_time) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DATA_TOTAL, - stats->ram_total + - stats->disk_total + - mirrorStats->total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DATA_PROCESSED, - stats->ram_transferred + - stats->disk_transferred + - mirrorStats->transferred) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DATA_REMAINING, - stats->ram_remaining + - stats->disk_remaining + - mirrorRemaining) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_TOTAL, - stats->ram_total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_PROCESSED, - stats->ram_transferred) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_REMAINING, - stats->ram_remaining) < 0) - goto error; - - if (stats->ram_bps && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_BPS, - stats->ram_bps) < 0) - goto error; - - if (stats->ram_duplicate_set) { - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_CONSTANT, - stats->ram_duplicate) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_NORMAL, - stats->ram_normal) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES, - stats->ram_normal_bytes) < 0) - goto error; - } - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, - stats->ram_dirty_rate) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_ITERATION, - stats->ram_iteration) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_POSTCOPY_REQS, - stats->ram_postcopy_reqs) < 0) - goto error; - - if (stats->ram_page_size > 0 && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_PAGE_SIZE, - stats->ram_page_size) < 0) - goto error; - - /* The remaining stats are disk, mirror, or migration specific - * so if this is a SAVEDUMP, we can just skip them */ - if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) - goto done; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_TOTAL, - stats->disk_total + - mirrorStats->total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_PROCESSED, - stats->disk_transferred + - mirrorStats->transferred) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_REMAINING, - stats->disk_remaining + - mirrorRemaining) < 0) - goto error; - - if (stats->disk_bps && - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_DISK_BPS, - stats->disk_bps) < 0) - goto error; - - if (stats->xbzrle_set) { - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_CACHE, - stats->xbzrle_cache_size) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_BYTES, - stats->xbzrle_bytes) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_PAGES, - stats->xbzrle_pages) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES, - stats->xbzrle_cache_miss) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW, - stats->xbzrle_overflow) < 0) - goto error; - } - - if (stats->cpu_throttle_percentage && - virTypedParamsAddInt(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE, - stats->cpu_throttle_percentage) < 0) - goto error; - - done: - *type = qemuDomainJobStatusToType(jobInfo->status); - *params = par; - *nparams = npar; - return 0; - - error: - virTypedParamsFree(par, npar); - return -1; -} - - -static int -qemuDomainDumpJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - qemuMonitorDumpStats *stats = &jobInfo->stats.dump; - virTypedParameterPtr par = NULL; - int maxpar = 0; - int npar = 0; - - if (virTypedParamsAddInt(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_OPERATION, - jobInfo->operation) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) - goto error; - - if (virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_TOTAL, - stats->total) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_PROCESSED, - stats->completed) < 0 || - virTypedParamsAddULLong(&par, &npar, &maxpar, - VIR_DOMAIN_JOB_MEMORY_REMAINING, - stats->total - stats->completed) < 0) - goto error; - - *type = qemuDomainJobStatusToType(jobInfo->status); - *params = par; - *nparams = npar; - return 0; - - error: - virTypedParamsFree(par, npar); - return -1; -} - - -static int -qemuDomainBackupJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - qemuDomainBackupStats *stats = &jobInfo->stats.backup; - g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); - - if (virTypedParamListAddInt(par, jobInfo->operation, - VIR_DOMAIN_JOB_OPERATION) < 0) - return -1; - - if (virTypedParamListAddULLong(par, jobInfo->timeElapsed, - VIR_DOMAIN_JOB_TIME_ELAPSED) < 0) - return -1; - - if (stats->transferred > 0 || stats->total > 0) { - if (virTypedParamListAddULLong(par, stats->total, - VIR_DOMAIN_JOB_DISK_TOTAL) < 0) - return -1; - - if (virTypedParamListAddULLong(par, stats->transferred, - VIR_DOMAIN_JOB_DISK_PROCESSED) < 0) - return -1; - - if (virTypedParamListAddULLong(par, stats->total - stats->transferred, - VIR_DOMAIN_JOB_DISK_REMAINING) < 0) - return -1; - } - - if (stats->tmp_used > 0 || stats->tmp_total > 0) { - if (virTypedParamListAddULLong(par, stats->tmp_used, - VIR_DOMAIN_JOB_DISK_TEMP_USED) < 0) - return -1; - - if (virTypedParamListAddULLong(par, stats->tmp_total, - VIR_DOMAIN_JOB_DISK_TEMP_TOTAL) < 0) - return -1; - } - - if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && - virTypedParamListAddBoolean(par, - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, - VIR_DOMAIN_JOB_SUCCESS) < 0) - return -1; - - if (jobInfo->errmsg && - virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) - return -1; - - *nparams = virTypedParamListStealParams(par, params); - *type = qemuDomainJobStatusToType(jobInfo->status); - return 0; -} - - -int -qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) -{ - switch (jobInfo->statsType) { - case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); - - case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); - - case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); - - case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("invalid job statistics type")); - break; - - default: - virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); - break; - } - - return -1; -} - - void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, virDomainObjPtr obj, @@ -894,13 +417,11 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - priv->job.current = g_new0(qemuDomainJobInfo, 1); - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->job.cb->currentJobInfoInit(&priv->job, now); priv->job.asyncJob = asyncJob; priv->job.asyncOwner = virThreadSelfID(); priv->job.asyncOwnerAPI = virThreadJobGet(); priv->job.asyncStarted = now; - priv->job.current->started = now; } } @@ -1066,7 +587,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, return -1; priv = obj->privateData; - priv->job.current->operation = operation; + priv->job.cb->setJobInfoOperation(&priv->job, operation); priv->job.apiFlags = apiFlags; return 0; } diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index c83e055647..88051d099a 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -19,7 +19,6 @@ #pragma once #include <glib-object.h> -#include "qemu_monitor.h" #define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) #define QEMU_JOB_DEFAULT_MASK \ @@ -99,61 +98,6 @@ typedef enum { QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP, } qemuDomainJobStatsType; - -typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats; -typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr; -struct _qemuDomainMirrorStats { - unsigned long long transferred; - unsigned long long total; -}; - -typedef struct _qemuDomainBackupStats qemuDomainBackupStats; -struct _qemuDomainBackupStats { - unsigned long long transferred; - unsigned long long total; - unsigned long long tmp_used; - unsigned long long tmp_total; -}; - -typedef struct _qemuDomainJobInfo qemuDomainJobInfo; -typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; -struct _qemuDomainJobInfo { - qemuDomainJobStatus status; - virDomainJobOperation operation; - unsigned long long started; /* When the async job started */ - unsigned long long stopped; /* When the domain's CPUs were stopped */ - unsigned long long sent; /* When the source sent status info to the - destination (only for migrations). */ - unsigned long long received; /* When the destination host received status - info from the source (migrations only). */ - /* Computed values */ - unsigned long long timeElapsed; - long long timeDelta; /* delta = received - sent, i.e., the difference - between the source and the destination time plus - the time between the end of Perform phase on the - source and the beginning of Finish phase on the - destination. */ - bool timeDeltaSet; - /* Raw values from QEMU */ - qemuDomainJobStatsType statsType; - union { - qemuMonitorMigrationStats mig; - qemuMonitorDumpStats dump; - qemuDomainBackupStats backup; - } stats; - qemuDomainMirrorStats mirrorStats; - - char *errmsg; /* optional error message for failed completed jobs */ -}; - -void -qemuDomainJobInfoFree(qemuDomainJobInfoPtr info); - -G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree); - -qemuDomainJobInfoPtr -qemuDomainJobInfoCopy(qemuDomainJobInfoPtr info); - typedef struct _qemuDomainJobObj qemuDomainJobObj; typedef qemuDomainJobObj *qemuDomainJobObjPtr; @@ -164,6 +108,10 @@ typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, qemuDomainJobObjPtr); typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr, qemuDomainJobObjPtr); +typedef void (*qemuDomainObjJobInfoSetOperation)(qemuDomainJobObjPtr, + virDomainJobOperation); +typedef void (*qemuDomainObjCurrentJobInfoInit)(qemuDomainJobObjPtr, + unsigned long long); typedef struct _qemuDomainObjPrivateJobCallbacks qemuDomainObjPrivateJobCallbacks; typedef qemuDomainObjPrivateJobCallbacks *qemuDomainObjPrivateJobCallbacksPtr; @@ -173,6 +121,8 @@ struct _qemuDomainObjPrivateJobCallbacks { qemuDomainObjPrivateJobReset resetJobPrivate; qemuDomainObjPrivateJobFormat formatJob; qemuDomainObjPrivateJobParse parseJob; + qemuDomainObjJobInfoSetOperation setJobInfoOperation; + qemuDomainObjCurrentJobInfoInit currentJobInfoInit; }; struct _qemuDomainJobObj { @@ -198,8 +148,6 @@ struct _qemuDomainJobObj { unsigned long long asyncStarted; /* When the current async job started */ int phase; /* Job phase (mainly for migrations) */ unsigned long long mask; /* Jobs allowed during async job */ - qemuDomainJobInfoPtr current; /* async job progress data */ - qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */ bool abortJob; /* abort of the job requested */ char *error; /* job event completion error */ unsigned long apiFlags; /* flags passed to the API which started the async job */ @@ -213,9 +161,6 @@ const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, const char *phase); -void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, - virDomainObjPtr vm); - int qemuDomainObjBeginJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainJob job) @@ -262,20 +207,6 @@ void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, void qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, virDomainObjPtr vm); -int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo) - ATTRIBUTE_NONNULL(1); -int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) - ATTRIBUTE_NONNULL(1); -int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, - virDomainJobInfoPtr info) - ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); -int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, - int *type, - virTypedParameterPtr *params, - int *nparams) - ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) - ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4); - bool qemuDomainTrackJob(qemuDomainJob job); void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0f98243fe4..ad3b657268 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2724,6 +2724,7 @@ qemuDomainGetControlInfo(virDomainPtr dom, { virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; int ret = -1; virCheckFlags(0, -1); @@ -2738,6 +2739,7 @@ qemuDomainGetControlInfo(virDomainPtr dom, goto cleanup; priv = vm->privateData; + jobPriv = priv->job.privateData; memset(info, 0, sizeof(*info)); @@ -2747,9 +2749,9 @@ qemuDomainGetControlInfo(virDomainPtr dom, } else if (priv->job.active) { if (virTimeMillisNow(&info->stateTime) < 0) goto cleanup; - if (priv->job.current) { + if (jobPriv->current) { info->state = VIR_DOMAIN_CONTROL_JOB; - info->stateTime -= priv->job.current->started; + info->stateTime -= jobPriv->current->started; } else { if (priv->monStart > 0) { info->state = VIR_DOMAIN_CONTROL_OCCUPIED; @@ -3314,6 +3316,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, int ret = -1; virObjectEventPtr event = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virQEMUSaveDataPtr data = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL; @@ -3330,7 +3333,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, goto endjob; } - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Pause */ if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { @@ -3715,7 +3718,7 @@ qemuDumpWaitForCompletion(virDomainObjPtr vm) return -1; } - if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { + if (jobPriv->current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { if (priv->job.error) virReportError(VIR_ERR_OPERATION_FAILED, _("memory-only dump failed: %s"), @@ -3726,7 +3729,7 @@ qemuDumpWaitForCompletion(virDomainObjPtr vm) return -1; } - qemuDomainJobInfoUpdateTime(priv->job.current); + qemuDomainJobInfoUpdateTime(jobPriv->current); return 0; } @@ -3740,6 +3743,7 @@ qemuDumpToFd(virQEMUDriverPtr driver, const char *dumpformat) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; bool detach = false; int ret = -1; @@ -3755,9 +3759,9 @@ qemuDumpToFd(virQEMUDriverPtr driver, return -1; if (detach) - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; else - g_clear_pointer(&priv->job.current, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->current, qemuDomainJobInfoFree); if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) return -1; @@ -3894,6 +3898,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv = NULL; + qemuDomainJobPrivatePtr jobPriv; bool resume = false, paused = false; int ret = -1; virObjectEventPtr event = NULL; @@ -3918,7 +3923,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, goto endjob; priv = vm->privateData; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv = priv->job.privateData; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Migrate will always stop the VM, so the resume condition is independent of whether the stop command is issued. */ @@ -7480,6 +7486,7 @@ qemuDomainObjStart(virConnectPtr conn, bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0; unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0; start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0; @@ -7503,8 +7510,8 @@ qemuDomainObjStart(virConnectPtr conn, } vm->hasManagedSave = false; } else { - virDomainJobOperation op = priv->job.current->operation; - priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE; + virDomainJobOperation op = jobPriv->current->operation; + jobPriv->current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE; ret = qemuDomainObjRestore(conn, driver, vm, managed_save, start_paused, bypass_cache, asyncJob); @@ -7522,7 +7529,7 @@ qemuDomainObjStart(virConnectPtr conn, return ret; } else { VIR_WARN("Ignoring incomplete managed state %s", managed_save); - priv->job.current->operation = op; + jobPriv->current->operation = op; vm->hasManagedSave = false; } } @@ -13576,13 +13583,14 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, qemuDomainJobInfoPtr *jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; int ret = -1; *jobInfo = NULL; if (completed) { - if (priv->job.completed && !priv->job.current) - *jobInfo = qemuDomainJobInfoCopy(priv->job.completed); + if (jobPriv->completed && !jobPriv->current) + *jobInfo = qemuDomainJobInfoCopy(jobPriv->completed); return 0; } @@ -13600,11 +13608,11 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, if (virDomainObjCheckActive(vm) < 0) goto cleanup; - if (!priv->job.current) { + if (!jobPriv->current) { ret = 0; goto cleanup; } - *jobInfo = qemuDomainJobInfoCopy(priv->job.current); + *jobInfo = qemuDomainJobInfoCopy(jobPriv->current); switch ((*jobInfo)->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: @@ -13679,6 +13687,7 @@ qemuDomainGetJobStats(virDomainPtr dom, virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; g_autoptr(qemuDomainJobInfo) jobInfo = NULL; bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED); int ret = -1; @@ -13693,6 +13702,7 @@ qemuDomainGetJobStats(virDomainPtr dom, goto cleanup; priv = vm->privateData; + jobPriv = priv->job.privateData; if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) goto cleanup; @@ -13708,7 +13718,7 @@ qemuDomainGetJobStats(virDomainPtr dom, ret = qemuDomainJobInfoToParams(jobInfo, type, params, nparams); if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED)) - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); cleanup: virDomainObjEndAPI(&vm); @@ -13740,6 +13750,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; int reason; if (!(vm = qemuDomainObjFromDomain(dom))) @@ -13755,6 +13766,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) goto endjob; priv = vm->privateData; + jobPriv = priv->job.privateData; switch (priv->job.asyncJob) { case QEMU_ASYNC_JOB_NONE: @@ -13775,7 +13787,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) break; case QEMU_ASYNC_JOB_MIGRATION_OUT: - if ((priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || + if ((jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY))) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", @@ -15443,6 +15455,7 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, bool resume = false; int ret = -1; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; g_autofree char *xml = NULL; virDomainSnapshotDefPtr snapdef = virDomainSnapshotObjGetDef(snap); bool memory = snapdef->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL; @@ -15520,7 +15533,7 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* allow the migration job to be cancelled or the domain to be paused */ qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 0f2f92b211..c517774c9f 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1008,6 +1008,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; int port; size_t i; unsigned long long mirror_speed = speed; @@ -1052,7 +1053,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, return -1; if (priv->job.abortJob) { - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); @@ -1070,7 +1071,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, } qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - priv->job.current); + jobPriv->current); /* Okay, all disks are ready. Modify migrate_flags */ *migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK | @@ -1550,7 +1551,8 @@ qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPtr jobInfo = jobPriv->current; char *error = NULL; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int ret = -1; @@ -1620,7 +1622,8 @@ qemuMigrationAnyCompleted(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPtr jobInfo = jobPriv->current; int pauseReason; if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0) @@ -1711,7 +1714,8 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobInfoPtr jobInfo = priv->job.current; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobInfoPtr jobInfo = jobPriv->current; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int rv; @@ -1743,9 +1747,9 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobInfoUpdateDowntime(jobInfo); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); - priv->job.completed = qemuDomainJobInfoCopy(jobInfo); - priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); + jobPriv->completed = qemuDomainJobInfoCopy(jobInfo); + jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) @@ -3018,16 +3022,16 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, return -1; if (retcode == 0) - jobInfo = priv->job.completed; + jobInfo = jobPriv->completed; else - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); /* Update times with the values sent by the destination daemon */ if (mig->jobInfo && jobInfo) { int reason; /* We need to refresh migration statistics after a completed post-copy - * migration since priv->job.completed contains obsolete data from the + * migration since jobPriv->completed contains obsolete data from the * time we switched to post-copy mode. */ if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && @@ -3479,6 +3483,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, int ret = -1; unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; g_autoptr(qemuMigrationCookie) mig = NULL; g_autofree char *tlsAlias = NULL; qemuMigrationIOThreadPtr iothread = NULL; @@ -3636,7 +3641,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, /* explicitly do this *after* we entered the monitor, * as this is a critical section so we are guaranteed * priv->job.abortJob will not change */ - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); @@ -3741,7 +3746,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, * resume it now once we finished all block jobs and wait for the real * end of the migration. */ - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { + if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { if (qemuMigrationSrcContinue(driver, vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) @@ -3769,11 +3774,11 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, goto error; } - if (priv->job.completed) { - priv->job.completed->stopped = priv->job.current->stopped; - qemuDomainJobInfoUpdateTime(priv->job.completed); - qemuDomainJobInfoUpdateDowntime(priv->job.completed); - ignore_value(virTimeMillisNow(&priv->job.completed->sent)); + if (jobPriv->completed) { + jobPriv->completed->stopped = jobPriv->current->stopped; + qemuDomainJobInfoUpdateTime(jobPriv->completed); + qemuDomainJobInfoUpdateDowntime(jobPriv->completed); + ignore_value(virTimeMillisNow(&jobPriv->completed->sent)); } cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK | @@ -3801,7 +3806,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (virDomainObjIsActive(vm)) { if (cancel && - priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && + jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMonitorMigrateCancel(priv->mon); @@ -3814,8 +3819,8 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn); - if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + if (jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; } if (iothread) @@ -5023,7 +5028,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, : QEMU_MIGRATION_PHASE_FINISH2); qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK | QEMU_MIGRATION_COOKIE_STATS | @@ -5115,7 +5120,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, goto endjob; } - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) inPostCopy = true; if (!(flags & VIR_MIGRATE_PAUSED)) { @@ -5229,9 +5234,9 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, if (dom) { if (jobInfo) { - priv->job.completed = g_steal_pointer(&jobInfo); - priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; - priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + jobPriv->completed = g_steal_pointer(&jobInfo); + jobPriv->completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobPriv->completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; } if (qemuMigrationBakeCookie(mig, driver, vm, @@ -5244,7 +5249,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, * is obsolete anyway. */ if (inPostCopy) - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); } qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, @@ -5473,6 +5478,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, unsigned long apiFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virDomainJobOperation op; unsigned long long mask; @@ -5489,7 +5495,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) return -1; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; qemuDomainObjSetAsyncJobMask(vm, mask); return 0; diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index 81b557e0a8..a0e8cba8ba 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -509,12 +509,13 @@ qemuMigrationCookieAddStatistics(qemuMigrationCookiePtr mig, virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (!priv->job.completed) + if (!jobPriv->completed) return 0; g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree); - mig->jobInfo = qemuDomainJobInfoCopy(priv->job.completed); + mig->jobInfo = qemuDomainJobInfoCopy(jobPriv->completed); mig->flags |= QEMU_MIGRATION_COOKIE_STATS; @@ -1465,6 +1466,7 @@ qemuMigrationEatCookie(virQEMUDriverPtr driver, unsigned int flags) { g_autoptr(qemuMigrationCookie) mig = NULL; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; /* Parse & validate incoming cookie (if any) */ if (cookiein && cookieinlen && @@ -1513,7 +1515,7 @@ qemuMigrationEatCookie(virQEMUDriverPtr driver, } if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo) - mig->jobInfo->operation = priv->job.current->operation; + mig->jobInfo->operation = jobPriv->current->operation; return g_steal_pointer(&mig); } diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 126fabf5ef..652d217b5c 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -657,6 +657,7 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainEventSuspendedDetailType detail; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virObjectLock(vm); @@ -668,7 +669,7 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING && !priv->pausedShutdown) { if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) { - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) reason = VIR_DOMAIN_PAUSED_POSTCOPY; else reason = VIR_DOMAIN_PAUSED_MIGRATION; @@ -680,8 +681,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon G_GNUC_UNUSED, vm->def->name, virDomainPausedReasonTypeToString(reason), detail); - if (priv->job.current) - ignore_value(virTimeMillisNow(&priv->job.current->stopped)); + if (jobPriv->current) + ignore_value(virTimeMillisNow(&jobPriv->current->stopped)); if (priv->signalStop) virDomainObjBroadcast(vm); @@ -1649,6 +1650,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, void *opaque) { qemuDomainObjPrivatePtr priv; + qemuDomainJobPrivatePtr jobPriv; virQEMUDriverPtr driver = opaque; virObjectEventPtr event = NULL; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); @@ -1661,12 +1663,13 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon G_GNUC_UNUSED, qemuMonitorMigrationStatusTypeToString(status)); priv = vm->privateData; + jobPriv = priv->job.privateData; if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { VIR_DEBUG("got MIGRATION event without a migration job"); goto cleanup; } - priv->job.current->stats.mig.status = status; + jobPriv->current->stats.mig.status = status; virDomainObjBroadcast(vm); if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY && @@ -1747,13 +1750,13 @@ qemuProcessHandleDumpCompleted(qemuMonitorPtr mon G_GNUC_UNUSED, goto cleanup; } jobPriv->dumpCompleted = true; - priv->job.current->stats.dump = *stats; + jobPriv->current->stats.dump = *stats; priv->job.error = g_strdup(error); /* Force error if extracting the DUMP_COMPLETED status failed */ if (!error && status < 0) { priv->job.error = g_strdup(virGetLastErrorMessage()); - priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; + jobPriv->current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; } virDomainObjBroadcast(vm); @@ -3267,6 +3270,7 @@ int qemuProcessStopCPUs(virQEMUDriverPtr driver, { int ret = -1; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; VIR_FREE(priv->lockState); @@ -3285,8 +3289,8 @@ int qemuProcessStopCPUs(virQEMUDriverPtr driver, /* de-activate netdevs after stopping CPUs */ ignore_value(qemuInterfaceStopDevices(vm->def)); - if (priv->job.current) - ignore_value(virTimeMillisNow(&priv->job.current->stopped)); + if (jobPriv->current) + ignore_value(virTimeMillisNow(&jobPriv->current->stopped)); /* The STOP event handler will change the domain state with the reason * saved in priv->pausedReason and it will also emit corresponding domain @@ -3583,6 +3587,7 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, unsigned int *stopFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; virDomainState state; int reason; unsigned long long now; @@ -3651,11 +3656,11 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, /* We reset the job parameters for backup so that the job will look * active. This is possible because we are able to recover the state * of blockjobs and also the backup job allows all sub-job types */ - priv->job.current = g_new0(qemuDomainJobInfo, 1); - priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; - priv->job.current->started = now; + jobPriv->current = g_new0(qemuDomainJobInfo, 1); + jobPriv->current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; + jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + jobPriv->current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobPriv->current->started = now; break; case QEMU_ASYNC_JOB_NONE: @@ -3760,7 +3765,6 @@ qemuDomainPerfRestart(virDomainObjPtr vm) return 0; } - static void qemuProcessReconnectCheckMemAliasOrderMismatch(virDomainObjPtr vm) { -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:43PM +0530, Prathamesh Chavan wrote:
As `qemuDomainJobInfo` had attributes specific to qemu hypervisor's jobs, we moved the attribute `current` and `completed` from `qemuDomainJobObj` to its `privateData` structure.
In this process, two callback functions: `setJobInfoOperation` and `currentJobInfoInit` were introduced to qemuDomainJob's callback structure.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 22 +- src/qemu/qemu_domain.c | 498 +++++++++++++++++++++++++++++++ src/qemu/qemu_domain.h | 74 +++++ src/qemu/qemu_domainjob.c | 483 +----------------------------- src/qemu/qemu_domainjob.h | 81 +---- src/qemu/qemu_driver.c | 49 +-- src/qemu/qemu_migration.c | 62 ++-- src/qemu/qemu_migration_cookie.c | 8 +- src/qemu/qemu_process.c | 32 +- 9 files changed, 680 insertions(+), 629 deletions(-)
This patch does IMO too much, moving qemuDomainJobInfo struct to qemu_domain.h moving a bunch of functions that depend on the qemuDomainJobInfo structure to qemu_domain.c, moving attributes "current" and "completely" to a different structure, and introducing new callbacks. This caused the moved code to be changed in the same step in order to reflect the attribute movement. To illustrate this: ...
+void +qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
^This line was changed during the code movement
+ virObjectEventPtr event; + virTypedParameterPtr params = NULL; + int nparams = 0; + int type; + + if (!jobPriv->completed) + return;
^These 2 as well...
+ + if (qemuDomainJobInfoToParams(jobPriv->completed, &type,
^This one too... When doing code movements, it's a better idea to first move the code and then perform the changes, it's easier for the reviewer as well as the one looking at the commit history. You should be able to move the affected functions that need the qemuDomainJobInfo structure along with the structure in one patch and then in another patch move the attributes "current" and "completely" to a different place and adjust the code accordingly. If for some reason moving the qemuDomainJobInfo structure in the first patch caused issues for the follow-up patch moving the attributes, then since qemu_domain.h includes qemu_domainjob.h you could leave the qemuDomainJobInfo structure movement out of the first patch and move in the second one.
+ ¶ms, &nparams) < 0) { + VIR_WARN("Could not get stats for completed job; domain %s", + vm->def->name); + } + + event = virDomainEventJobCompletedNewFromObj(vm, params, nparams); + virObjectEventStateQueue(driver->domainEventState, event); +}
...
static int qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, @@ -140,6 +636,8 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .resetJobPrivate = qemuJobResetPrivate, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, + .setJobInfoOperation = qemuDomainJobInfoSetOperation,
^This would probably be better called jobInfoSetOperation
+ .currentJobInfoInit = qemuDomainCurrentJobInfoInit,
As you've established in the commit message itself "current" and "completed" are QEMU specific, so the callback should therefore be called jobInfoInit or maybe jobInfoNew. Erik

Sorry for not noticing this earlier, but the movement needs to happen together with changes in codes as the moved code is no longer available to be directly accessed by `qemu_domainjob`. (If we include `qemu_domain.h` in `qemu_domainjob.h`, a cyclic dependency will get created). Thanks, Prathamesh Chavan On Mon, Aug 10, 2020 at 6:36 PM Erik Skultety <eskultet@redhat.com> wrote:
On Tue, Aug 04, 2020 at 08:06:43PM +0530, Prathamesh Chavan wrote:
As `qemuDomainJobInfo` had attributes specific to qemu hypervisor's jobs, we moved the attribute `current` and `completed` from `qemuDomainJobObj` to its `privateData` structure.
In this process, two callback functions: `setJobInfoOperation` and `currentJobInfoInit` were introduced to qemuDomainJob's callback structure.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 22 +- src/qemu/qemu_domain.c | 498 +++++++++++++++++++++++++++++++ src/qemu/qemu_domain.h | 74 +++++ src/qemu/qemu_domainjob.c | 483 +----------------------------- src/qemu/qemu_domainjob.h | 81 +---- src/qemu/qemu_driver.c | 49 +-- src/qemu/qemu_migration.c | 62 ++-- src/qemu/qemu_migration_cookie.c | 8 +- src/qemu/qemu_process.c | 32 +- 9 files changed, 680 insertions(+), 629 deletions(-)
This patch does IMO too much, moving qemuDomainJobInfo struct to qemu_domain.h moving a bunch of functions that depend on the qemuDomainJobInfo structure to qemu_domain.c, moving attributes "current" and "completely" to a different structure, and introducing new callbacks. This caused the moved code to be changed in the same step in order to reflect the attribute movement. To illustrate this:
...
+void +qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, + virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
^This line was changed during the code movement
+ virObjectEventPtr event; + virTypedParameterPtr params = NULL; + int nparams = 0; + int type; + + if (!jobPriv->completed) + return;
^These 2 as well...
+ + if (qemuDomainJobInfoToParams(jobPriv->completed, &type,
^This one too...
When doing code movements, it's a better idea to first move the code and then perform the changes, it's easier for the reviewer as well as the one looking at the commit history. You should be able to move the affected functions that need the qemuDomainJobInfo structure along with the structure in one patch and then in another patch move the attributes "current" and "completely" to a different place and adjust the code accordingly. If for some reason moving the qemuDomainJobInfo structure in the first patch caused issues for the follow-up patch moving the attributes, then since qemu_domain.h includes qemu_domainjob.h you could leave the qemuDomainJobInfo structure movement out of the first patch and move in the second one.
+ ¶ms, &nparams) < 0) { + VIR_WARN("Could not get stats for completed job; domain %s", + vm->def->name); + } + + event = virDomainEventJobCompletedNewFromObj(vm, params, nparams); + virObjectEventStateQueue(driver->domainEventState, event); +}
...
static int qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, @@ -140,6 +636,8 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .resetJobPrivate = qemuJobResetPrivate, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, + .setJobInfoOperation = qemuDomainJobInfoSetOperation,
^This would probably be better called jobInfoSetOperation
+ .currentJobInfoInit = qemuDomainCurrentJobInfoInit,
As you've established in the commit message itself "current" and "completed" are QEMU specific, so the callback should therefore be called jobInfoInit or maybe jobInfoNew.
Erik

Since the attribute `jobs_queued` was specific to jobs, we decided to move this from `qemuDomainObjPrivate` to `qemuDomainJobObj` structure. Also, reference to `maxQueuedJobs` required us to access config of the qemu-driver. And creating its copy in the `qemuDomainJob` helped us access the variable without referencing the driver's config. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 5 +- src/qemu/qemu_domain.h | 2 - src/qemu/qemu_domainjob.c | 142 +++++++++++++++++++++----------------- src/qemu/qemu_domainjob.h | 6 +- src/qemu/qemu_process.c | 12 ++-- 5 files changed, 94 insertions(+), 73 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 1ae44ae39f..677fa7ea91 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2085,11 +2085,14 @@ static void * qemuDomainObjPrivateAlloc(void *opaque) { qemuDomainObjPrivatePtr priv; + virQEMUDriverPtr driver = opaque; + g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); if (VIR_ALLOC(priv) < 0) return NULL; - if (qemuDomainObjInitJob(&priv->job, &qemuPrivateJobCallbacks) < 0) { + if (qemuDomainObjInitJob(&priv->job, &qemuPrivateJobCallbacks, + cfg->maxQueuedJobs) < 0) { virReportSystemError(errno, "%s", _("Unable to init qemu driver mutexes")); goto error; diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 386ae17272..507f710200 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -161,8 +161,6 @@ struct _qemuDomainObjPrivate { bool pausedShutdown; virTristateBool allowReboot; - int jobs_queued; - unsigned long migMaxBandwidth; char *origname; int nbdPort; /* Port used for migration with NBD */ diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 503a87bb12..1057f8d309 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -117,10 +117,12 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, int qemuDomainObjInitJob(qemuDomainJobObjPtr job, - qemuDomainObjPrivateJobCallbacksPtr cb) + qemuDomainObjPrivateJobCallbacksPtr cb, + unsigned int maxQueuedJobs) { memset(job, 0, sizeof(*job)); job->cb = cb; + job->maxQueuedJobs = maxQueuedJobs; if (!(job->privateData = job->cb->allocJobPrivate())) return -1; @@ -334,17 +336,16 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, static int ATTRIBUTE_NONNULL(1) qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job, qemuDomainAgentJob agentJob, qemuDomainAsyncJob asyncJob, bool nowait) { - qemuDomainObjPrivatePtr priv = obj->privateData; unsigned long long now; unsigned long long then; bool nested = job == QEMU_JOB_ASYNC_NESTED; bool async = job == QEMU_JOB_ASYNC; - g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); const char *blocker = NULL; const char *agentBlocker = NULL; int ret = -1; @@ -358,85 +359,85 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAgentJobTypeToString(agentJob), qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAgentJobTypeToString(priv->job.agentActive), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + qemuDomainJobTypeToString(jobObj->active), + qemuDomainAgentJobTypeToString(jobObj->agentActive), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); if (virTimeMillisNow(&now) < 0) return -1; - priv->jobs_queued++; + jobObj->jobs_queued++; then = now + QEMU_JOB_WAIT_TIME; retry: if ((!async && job != QEMU_JOB_DESTROY) && - cfg->maxQueuedJobs && - priv->jobs_queued > cfg->maxQueuedJobs) { + jobObj->maxQueuedJobs && + jobObj->jobs_queued > jobObj->maxQueuedJobs) { goto error; } - while (!nested && !qemuDomainNestedJobAllowed(&priv->job, job)) { + while (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) { if (nowait) goto cleanup; VIR_DEBUG("Waiting for async job (vm=%p name=%s)", obj, obj->def->name); - if (virCondWaitUntil(&priv->job.asyncCond, &obj->parent.lock, then) < 0) + if (virCondWaitUntil(&jobObj->asyncCond, &obj->parent.lock, then) < 0) goto error; } - while (!qemuDomainObjCanSetJob(&priv->job, job, agentJob)) { + while (!qemuDomainObjCanSetJob(jobObj, job, agentJob)) { if (nowait) goto cleanup; VIR_DEBUG("Waiting for job (vm=%p name=%s)", obj, obj->def->name); - if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0) + if (virCondWaitUntil(&jobObj->cond, &obj->parent.lock, then) < 0) goto error; } /* No job is active but a new async job could have been started while obj * was unlocked, so we need to recheck it. */ - if (!nested && !qemuDomainNestedJobAllowed(&priv->job, job)) + if (!nested && !qemuDomainNestedJobAllowed(jobObj, job)) goto retry; ignore_value(virTimeMillisNow(&now)); if (job) { - qemuDomainObjResetJob(&priv->job); + qemuDomainObjResetJob(jobObj); if (job != QEMU_JOB_ASYNC) { VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - priv->job.active = job; - priv->job.owner = virThreadSelfID(); - priv->job.ownerAPI = virThreadJobGet(); - priv->job.started = now; + jobObj->active = job; + jobObj->owner = virThreadSelfID(); + jobObj->ownerAPI = virThreadJobGet(); + jobObj->started = now; } else { VIR_DEBUG("Started async job: %s (vm=%p name=%s)", qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); - qemuDomainObjResetAsyncJob(&priv->job); - priv->job.cb->currentJobInfoInit(&priv->job, now); - priv->job.asyncJob = asyncJob; - priv->job.asyncOwner = virThreadSelfID(); - priv->job.asyncOwnerAPI = virThreadJobGet(); - priv->job.asyncStarted = now; + qemuDomainObjResetAsyncJob(jobObj); + jobObj->cb->currentJobInfoInit(jobObj, now); + jobObj->asyncJob = asyncJob; + jobObj->asyncOwner = virThreadSelfID(); + jobObj->asyncOwnerAPI = virThreadJobGet(); + jobObj->asyncStarted = now; } } if (agentJob) { - qemuDomainObjResetAgentJob(&priv->job); + qemuDomainObjResetAgentJob(jobObj); VIR_DEBUG("Started agent job: %s (vm=%p name=%s job=%s async=%s)", qemuDomainAgentJobTypeToString(agentJob), obj, obj->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); - priv->job.agentActive = agentJob; - priv->job.agentOwner = virThreadSelfID(); - priv->job.agentOwnerAPI = virThreadJobGet(); - priv->job.agentStarted = now; + qemuDomainJobTypeToString(jobObj->active), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); + jobObj->agentActive = agentJob; + jobObj->agentOwner = virThreadSelfID(); + jobObj->agentOwnerAPI = virThreadJobGet(); + jobObj->agentStarted = now; } if (qemuDomainTrackJob(job)) @@ -446,12 +447,12 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, error: ignore_value(virTimeMillisNow(&now)); - if (priv->job.active && priv->job.started) - duration = now - priv->job.started; - if (priv->job.agentActive && priv->job.agentStarted) - agentDuration = now - priv->job.agentStarted; - if (priv->job.asyncJob && priv->job.asyncStarted) - asyncDuration = now - priv->job.asyncStarted; + if (jobObj->active && jobObj->started) + duration = now - jobObj->started; + if (jobObj->agentActive && jobObj->agentStarted) + agentDuration = now - jobObj->agentStarted; + if (jobObj->asyncJob && jobObj->asyncStarted) + asyncDuration = now - jobObj->asyncStarted; VIR_WARN("Cannot start job (%s, %s, %s) for domain %s; " "current job is (%s, %s, %s) " @@ -461,24 +462,24 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, qemuDomainAgentJobTypeToString(agentJob), qemuDomainAsyncJobTypeToString(asyncJob), obj->def->name, - qemuDomainJobTypeToString(priv->job.active), - qemuDomainAgentJobTypeToString(priv->job.agentActive), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - priv->job.owner, NULLSTR(priv->job.ownerAPI), - priv->job.agentOwner, NULLSTR(priv->job.agentOwnerAPI), - priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI), - priv->job.apiFlags, + qemuDomainJobTypeToString(jobObj->active), + qemuDomainAgentJobTypeToString(jobObj->agentActive), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), + jobObj->owner, NULLSTR(jobObj->ownerAPI), + jobObj->agentOwner, NULLSTR(jobObj->agentOwnerAPI), + jobObj->asyncOwner, NULLSTR(jobObj->asyncOwnerAPI), + jobObj->apiFlags, duration / 1000, agentDuration / 1000, asyncDuration / 1000); if (job) { - if (nested || qemuDomainNestedJobAllowed(&priv->job, job)) - blocker = priv->job.ownerAPI; + if (nested || qemuDomainNestedJobAllowed(jobObj, job)) + blocker = jobObj->ownerAPI; else - blocker = priv->job.asyncOwnerAPI; + blocker = jobObj->asyncOwnerAPI; } if (agentJob) - agentBlocker = priv->job.agentOwnerAPI; + agentBlocker = jobObj->agentOwnerAPI; if (errno == ETIMEDOUT) { if (blocker && agentBlocker) { @@ -501,8 +502,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, _("cannot acquire state change lock")); } ret = -2; - } else if (cfg->maxQueuedJobs && - priv->jobs_queued > cfg->maxQueuedJobs) { + } else if (jobObj->maxQueuedJobs && + jobObj->jobs_queued > jobObj->maxQueuedJobs) { if (blocker && agentBlocker) { virReportError(VIR_ERR_OPERATION_FAILED, _("cannot acquire state change " @@ -532,7 +533,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, } cleanup: - priv->jobs_queued--; + jobObj->jobs_queued--; return ret; } @@ -548,7 +549,10 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainJob job) { - if (qemuDomainObjBeginJobInternal(driver, obj, job, + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + + if (qemuDomainObjBeginJobInternal(driver, obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, false) < 0) return -1; @@ -568,7 +572,10 @@ qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAgentJob agentJob) { - return qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_NONE, + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + + return qemuDomainObjBeginJobInternal(driver, obj, jobObj, QEMU_JOB_NONE, agentJob, QEMU_ASYNC_JOB_NONE, false); } @@ -579,15 +586,15 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainJobOperation operation, unsigned long apiFlags) { - qemuDomainObjPrivatePtr priv; + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; - if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC, + if (qemuDomainObjBeginJobInternal(driver, obj, jobObj, QEMU_JOB_ASYNC, QEMU_AGENT_JOB_NONE, asyncJob, false) < 0) return -1; - priv = obj->privateData; - priv->job.cb->setJobInfoOperation(&priv->job, operation); + priv->job.cb->setJobInfoOperation(jobObj, operation); priv->job.apiFlags = apiFlags; return 0; } @@ -598,6 +605,7 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; if (asyncJob != priv->job.asyncJob) { virReportError(VIR_ERR_INTERNAL_ERROR, @@ -611,7 +619,7 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, priv->job.asyncOwner); } - return qemuDomainObjBeginJobInternal(driver, obj, + return qemuDomainObjBeginJobInternal(driver, obj, jobObj, QEMU_JOB_ASYNC_NESTED, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, @@ -636,7 +644,10 @@ qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainJob job) { - return qemuDomainObjBeginJobInternal(driver, obj, job, + qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + + return qemuDomainObjBeginJobInternal(driver, obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, true); } @@ -651,9 +662,10 @@ void qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; qemuDomainJob job = priv->job.active; - priv->jobs_queued--; + jobObj->jobs_queued--; VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), @@ -672,9 +684,10 @@ void qemuDomainObjEndAgentJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; qemuDomainAgentJob agentJob = priv->job.agentActive; - priv->jobs_queued--; + jobObj->jobs_queued--; VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)", qemuDomainAgentJobTypeToString(agentJob), @@ -691,8 +704,9 @@ void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; + qemuDomainJobObjPtr jobObj = &priv->job; - priv->jobs_queued--; + jobObj->jobs_queued--; VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)", qemuDomainAsyncJobTypeToString(priv->job.asyncJob), diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 88051d099a..11e7f2f432 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -152,6 +152,9 @@ struct _qemuDomainJobObj { char *error; /* job event completion error */ unsigned long apiFlags; /* flags passed to the API which started the async job */ + int jobs_queued; + unsigned int maxQueuedJobs; + void *privateData; /* job specific collection of data */ qemuDomainObjPrivateJobCallbacksPtr cb; }; @@ -213,7 +216,8 @@ void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); int qemuDomainObjInitJob(qemuDomainJobObjPtr job, - qemuDomainObjPrivateJobCallbacksPtr cb); + qemuDomainObjPrivateJobCallbacksPtr cb, + unsigned int maxQueuedJobs); bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 652d217b5c..1c7c0ba19a 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -3587,7 +3587,9 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, unsigned int *stopFlags) { qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; + qemuDomainJobObjPtr jobObj = &priv->job; + qemuDomainJobPrivatePtr jobPriv = jobObj->privateData; + virDomainState state; int reason; unsigned long long now; @@ -3644,10 +3646,10 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, ignore_value(virTimeMillisNow(&now)); /* Restore the config of the async job which is not persisted */ - priv->jobs_queued++; - priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP; - priv->job.asyncOwnerAPI = virThreadJobGet(); - priv->job.asyncStarted = now; + jobObj->jobs_queued++; + jobObj->asyncJob = QEMU_ASYNC_JOB_BACKUP; + jobObj->asyncOwnerAPI = virThreadJobGet(); + jobObj->asyncStarted = now; qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:44PM +0530, Prathamesh Chavan wrote:
Since the attribute `jobs_queued` was specific to jobs, we decided to move this from `qemuDomainObjPrivate` to `qemuDomainJobObj` structure.
Also, reference to `maxQueuedJobs` required us to access config of the qemu-driver. And creating its copy in the `qemuDomainJob` helped us access the variable without referencing the driver's config.
Just like you split the paragraphs in the commit message, this patch should have been split in 2 as well even though both changes lead to a common goal. Erik

The function `qemuDomainObjSaveStatus` required an access to `virQEMUDriverPtr`. To make jobs hypervisor-agnostic we remove this funciton and replace it with a callback function from `qemuDomainJob` Removal of `virQEMUDriverPtr` as parameter resulted in removal of the same from function, where it was pass. All of such references were removed as the variable was no longer required. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 41 +- src/qemu/qemu_backup.h | 3 +- src/qemu/qemu_block.c | 45 +- src/qemu/qemu_block.h | 6 +- src/qemu/qemu_blockjob.c | 45 +- src/qemu/qemu_blockjob.h | 3 +- src/qemu/qemu_checkpoint.c | 29 +- src/qemu/qemu_domain.c | 78 ++- src/qemu/qemu_domain.h | 24 +- src/qemu/qemu_domainjob.c | 50 +- src/qemu/qemu_domainjob.h | 29 +- src/qemu/qemu_driver.c | 848 ++++++++++++++----------------- src/qemu/qemu_hotplug.c | 319 ++++++------ src/qemu/qemu_hotplug.h | 30 +- src/qemu/qemu_migration.c | 315 +++++------- src/qemu/qemu_migration.h | 12 +- src/qemu/qemu_migration_cookie.c | 7 +- src/qemu/qemu_migration_params.c | 48 +- src/qemu/qemu_migration_params.h | 15 +- src/qemu/qemu_process.c | 258 +++++----- src/qemu/qemu_process.h | 15 +- tests/qemuhotplugtest.c | 2 +- 22 files changed, 986 insertions(+), 1236 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 1822c6f267..7e5926250a 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -127,9 +127,9 @@ qemuBackupDiskDataCleanupOne(virDomainObjPtr vm, if (!dd->started) { if (dd->added) { - qemuDomainObjEnterMonitor(priv->driver, vm); + qemuDomainObjEnterMonitor(vm); qemuBlockStorageSourceAttachRollback(priv->mon, dd->crdata->srcdata[0]); - ignore_value(qemuDomainObjExitMonitor(priv->driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } if (dd->created) { @@ -439,12 +439,12 @@ qemuBackupDiskPrepareOneStorage(virDomainObjPtr vm, QEMU_ASYNC_JOB_BACKUP) < 0) return -1; } else { - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) return -1; rc = qemuBlockStorageSourceAttachApply(priv->mon, dd->crdata->srcdata[0]); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; } @@ -572,7 +572,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, virDomainBackupDefFree(priv->backup); priv->backup = NULL; - qemuDomainObjEndAsyncJob(priv->driver, vm); + qemuDomainObjEndAsyncJob(vm); } @@ -625,12 +625,12 @@ qemuBackupJobCancelBlockjobs(virDomainObjPtr vm, if (backupdisk->state != VIR_DOMAIN_BACKUP_DISK_STATE_RUNNING) continue; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; rc = qemuMonitorJobCancel(priv->mon, job->name, false); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; if (rc == 0) { @@ -740,7 +740,7 @@ qemuBackupBegin(virDomainObjPtr vm, * infrastructure for async jobs. We'll allow standard modify-type jobs * as the interlocking of conflicting operations is handled on the block * job level */ - if (qemuDomainObjBeginAsyncJob(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_BACKUP, VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0) return -1; @@ -804,7 +804,7 @@ qemuBackupBegin(virDomainObjPtr vm, priv->backup = g_steal_pointer(&def); - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) goto endjob; /* TODO: TLS is a must-have for the modern age */ @@ -824,7 +824,7 @@ qemuBackupBegin(virDomainObjPtr vm, if (rc == 0) rc = qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; job_started = true; @@ -837,12 +837,12 @@ qemuBackupBegin(virDomainObjPtr vm, } if (pull) { - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) < 0) goto endjob; /* note that if the export fails we've already created the checkpoint * and we will not delete it */ rc = qemuBackupBeginPullExportDisks(vm, dd, ndd); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) { @@ -863,14 +863,14 @@ qemuBackupBegin(virDomainObjPtr vm, qemuCheckpointRollbackMetadata(vm, chk); if (!job_started && (nbd_running || tlsAlias || tlsSecretAlias) && - qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) == 0) { + qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_BACKUP) == 0) { if (nbd_running) ignore_value(qemuMonitorNBDServerStop(priv->mon)); if (tlsAlias) ignore_value(qemuMonitorDelObject(priv->mon, tlsAlias, false)); if (tlsSecretAlias) ignore_value(qemuMonitorDelObject(priv->mon, tlsSecretAlias, false)); - ignore_value(qemuDomainObjExitMonitor(priv->driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } if (ret < 0 && !job_started && priv->backup) @@ -879,7 +879,7 @@ qemuBackupBegin(virDomainObjPtr vm, if (ret == 0) qemuDomainObjReleaseAsyncJob(vm); else - qemuDomainObjEndAsyncJob(priv->driver, vm); + qemuDomainObjEndAsyncJob(vm); return ret; } @@ -929,14 +929,14 @@ qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm, return; if (backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL) { - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; ignore_value(qemuMonitorNBDServerStop(priv->mon)); if (backup->tlsAlias) ignore_value(qemuMonitorDelObject(priv->mon, backup->tlsAlias, false)); if (backup->tlsSecretAlias) ignore_value(qemuMonitorDelObject(priv->mon, backup->tlsSecretAlias, false)); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; /* update the final statistics with the current job's data */ @@ -1067,8 +1067,7 @@ qemuBackupGetJobInfoStatsUpdateOne(virDomainObjPtr vm, int -qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBackupGetJobInfoStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { qemuDomainBackupStats *stats = &jobInfo->stats.backup; @@ -1090,11 +1089,11 @@ qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorGetJobInfo(priv->mon, &blockjobs, &nblockjobs); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; /* count in completed jobs */ diff --git a/src/qemu/qemu_backup.h b/src/qemu/qemu_backup.h index 075fde709b..9925fddbf9 100644 --- a/src/qemu/qemu_backup.h +++ b/src/qemu/qemu_backup.h @@ -48,8 +48,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, qemuDomainJobStatus jobstatus); int -qemuBackupGetJobInfoStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBackupGetJobInfoStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo); /* exported for testing */ diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c index 26c1b42428..23b60e73ec 100644 --- a/src/qemu/qemu_block.c +++ b/src/qemu/qemu_block.c @@ -320,8 +320,7 @@ qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk, int -qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockNodeNamesDetect(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -334,13 +333,13 @@ qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_NAMED_BLOCK_NODES)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; data = qemuMonitorQueryNamedBlockNodes(qemuDomainGetMonitor(vm)); blockstats = qemuMonitorQueryBlockstats(qemuDomainGetMonitor(vm)); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !data || !blockstats) + if (qemuDomainObjExitMonitor(vm) < 0 || !data || !blockstats) return -1; if (!(disktable = qemuBlockNodeNameGetBackingChain(data, blockstats))) @@ -1976,7 +1975,6 @@ qemuBlockStorageSourceChainDetach(qemuMonitorPtr mon, /** * qemuBlockStorageSourceDetachOneBlockdev: - * @driver: qemu driver object * @vm: domain object * @asyncJob: currently running async job * @src: storage source to detach @@ -1986,14 +1984,13 @@ qemuBlockStorageSourceChainDetach(qemuMonitorPtr mon, * monitor internally. */ int -qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virStorageSourcePtr src) { int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorBlockdevDel(qemuDomainGetMonitor(vm), src->nodeformat); @@ -2001,7 +1998,7 @@ qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriverPtr driver, if (ret == 0) ret = qemuMonitorBlockdevDel(qemuDomainGetMonitor(vm), src->nodestorage); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; @@ -2561,13 +2558,13 @@ qemuBlockStorageSourceCreateGeneric(virDomainObjPtr vm, qemuBlockJobSyncBegin(job); - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorBlockdevCreate(priv->mon, job->name, props); props = NULL; - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; qemuBlockJobStarted(job, vm); @@ -2708,18 +2705,18 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, false, true) < 0) return -1; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuBlockStorageSourceAttachApplyStorageDeps(priv->mon, data); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; if (qemuBlockStorageSourceCreateStorage(vm, src, chain, asyncJob) < 0) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuBlockStorageSourceAttachApplyStorage(priv->mon, data); @@ -2727,7 +2724,7 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, if (rc == 0) rc = qemuBlockStorageSourceAttachApplyFormatDeps(priv->mon, data); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; if (qemuBlockStorageSourceCreateFormat(vm, src, backingStore, chain, @@ -2740,12 +2737,12 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, false, true) < 0) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuBlockStorageSourceAttachApplyFormat(priv->mon, data); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; ret = 0; @@ -2753,10 +2750,10 @@ qemuBlockStorageSourceCreate(virDomainObjPtr vm, cleanup: if (ret < 0 && virDomainObjIsActive(vm) && - qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) == 0) { + qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuBlockStorageSourceAttachRollback(priv->mon, data); - ignore_value(qemuDomainObjExitMonitor(priv->driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } return ret; @@ -2861,17 +2858,16 @@ qemuBlockGetNamedNodeData(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; g_autoptr(virHashTable) blockNamedNodeData = NULL; bool supports_flat = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QMP_QUERY_NAMED_BLOCK_NODES_FLAT); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return NULL; blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon, supports_flat); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockNamedNodeData) + if (qemuDomainObjExitMonitor(vm) < 0 || !blockNamedNodeData) return NULL; return g_steal_pointer(&blockNamedNodeData); @@ -3185,7 +3181,6 @@ qemuBlockReopenFormat(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; g_autoptr(virJSONValue) reopenprops = NULL; int rc; @@ -3200,12 +3195,12 @@ qemuBlockReopenFormat(virDomainObjPtr vm, if (!(reopenprops = qemuBlockStorageSourceGetBlockdevProps(src, src->backingStore))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorBlockdevReopen(priv->mon, &reopenprops); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; return 0; diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h index 9aab620947..35148ea2ba 100644 --- a/src/qemu/qemu_block.h +++ b/src/qemu/qemu_block.h @@ -46,8 +46,7 @@ qemuBlockNodeNameGetBackingChain(virJSONValuePtr namednodesdata, virJSONValuePtr blockstats); int -qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockNodeNamesDetect(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); virHashTablePtr @@ -140,8 +139,7 @@ qemuBlockStorageSourceAttachRollback(qemuMonitorPtr mon, qemuBlockStorageSourceAttachDataPtr data); int -qemuBlockStorageSourceDetachOneBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuBlockStorageSourceDetachOneBlockdev(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virStorageSourcePtr src); diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index c49c98e547..265f449b7a 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -491,8 +491,7 @@ qemuBlockJobRefreshJobsFindInactive(const void *payload, int -qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuBlockJobRefreshJobs(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorJobInfoPtr *jobinfo = NULL; @@ -503,11 +502,11 @@ qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, int ret = -1; int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorGetJobInfo(priv->mon, &jobinfo, &njobinfo); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; for (i = 0; i < njobinfo; i++) { @@ -524,13 +523,13 @@ qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, qemuBlockJobMarkBroken(job); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorJobCancel(priv->mon, job->name, true); if (rc == -1 && jobinfo[i]->status == QEMU_MONITOR_JOB_STATUS_CONCLUDED) VIR_WARN("can't cancel job '%s' with invalid data", job->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) @@ -757,7 +756,7 @@ qemuBlockJobEventProcessLegacyCompleted(virQEMUDriverPtr driver, disk->src->id = 0; virStorageSourceBackingStoreClear(disk->src); ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, NULL, true)); - ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob)); + ignore_value(qemuBlockNodeNamesDetect(vm, asyncJob)); qemuBlockJobUnregister(job, vm); qemuDomainSaveConfig(vm); } @@ -843,11 +842,11 @@ qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriverPtr driver, if (!(data = qemuBlockStorageSourceChainDetachPrepareBlockdev(chain))) return; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; qemuBlockStorageSourceChainDetach(qemuDomainGetMonitor(vm), data); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; qemuDomainStorageSourceChainAccessRevoke(driver, vm, chain); @@ -959,12 +958,12 @@ qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObjPtr vm, if (!actions) return 0; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return 0; @@ -1123,12 +1122,12 @@ qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObjPtr vm, return -1; } - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (!active) { @@ -1346,12 +1345,12 @@ qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObjPtr vm, if (!actions) return 0; - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return 0; @@ -1431,12 +1430,12 @@ qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriverPtr driver, ignore_value(qemuMonitorTransactionBitmapRemove(actions, disk->mirror->nodeformat, "libvirt-tmp-activewrite")); - if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; /* Ideally, we would make the backing chain read only again (yes, SELinux @@ -1480,12 +1479,12 @@ qemuBlockJobProcessEventConcludedCreate(virQEMUDriverPtr driver, VIR_FREE(backend->encryptsecretAlias); } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.create.src); @@ -1520,7 +1519,7 @@ qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver, return; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return; if (backend) @@ -1529,7 +1528,7 @@ qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver, if (actions) qemuMonitorTransaction(qemuDomainGetMonitor(vm), &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return; if (job->data.backup.store) @@ -1610,7 +1609,7 @@ qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job, unsigned long long progressCurrent = 0; unsigned long long progressTotal = 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; /* we need to fetch the error state as the event does not propagate it */ @@ -1643,7 +1642,7 @@ qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job, /* dismiss job in qemu */ ignore_value(qemuMonitorJobDismiss(qemuDomainGetMonitor(vm), job->name)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if ((job->newstate == QEMU_BLOCKJOB_STATE_COMPLETED || diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h index 9f73a3547c..bdf4787eb0 100644 --- a/src/qemu/qemu_blockjob.h +++ b/src/qemu/qemu_blockjob.h @@ -226,8 +226,7 @@ qemuBlockJobStartupFinalize(virDomainObjPtr vm, qemuBlockJobDataPtr job); int -qemuBlockJobRefreshJobs(virQEMUDriverPtr driver, - virDomainObjPtr vm); +qemuBlockJobRefreshJobs(virDomainObjPtr vm); void qemuBlockJobUpdate(virDomainObjPtr vm, diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c index f45ab29d4c..b90410aa20 100644 --- a/src/qemu/qemu_checkpoint.c +++ b/src/qemu/qemu_checkpoint.c @@ -198,9 +198,9 @@ qemuCheckpointDiscardBitmaps(virDomainObjPtr vm, relabelimages = g_slist_prepend(relabelimages, src); } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; relabel: @@ -457,9 +457,9 @@ qemuCheckpointCreate(virQEMUDriverPtr driver, if (qemuCheckpointCreateCommon(driver, vm, def, &actions, &chk) < 0) return NULL; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorTransaction(qemuDomainGetMonitor(vm), &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) { + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) { qemuCheckpointRollbackMetadata(vm, chk); return NULL; } @@ -539,7 +539,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, /* Unlike snapshots, the RNG schema already ensured a sane filename. */ /* We are going to modify the domain below. */ - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return NULL; if (redefine) { @@ -561,7 +561,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, checkpoint = virGetDomainCheckpoint(domain, chk->def->name); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return checkpoint; } @@ -578,7 +578,6 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, virDomainCheckpointDefPtr chkdef) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; g_autoptr(virHashTable) blockNamedNodeData = NULL; g_autofree struct qemuCheckpointDiskMap *diskmap = NULL; g_autoptr(virJSONValue) recoveractions = NULL; @@ -589,7 +588,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -659,7 +658,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (rc == 0 && recoveractions) rc = qemuMonitorTransaction(priv->mon, &recoveractions); @@ -667,7 +666,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, if (rc == 0) rc = qemuMonitorTransaction(priv->mon, &mergeactions); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; /* now do a final refresh */ @@ -675,11 +674,11 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, QEMU_ASYNC_JOB_NONE))) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorTransaction(priv->mon, &cleanupactions); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; /* update disks */ @@ -698,7 +697,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -782,7 +781,7 @@ qemuCheckpointDelete(virDomainObjPtr vm, VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY | VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (!metadata_only) { @@ -850,6 +849,6 @@ qemuCheckpointDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 677fa7ea91..d7a944a886 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -634,6 +634,7 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, + .saveStatus = qemuDomainSaveStatus, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, .setJobInfoOperation = qemuDomainJobInfoSetOperation, @@ -5905,20 +5906,19 @@ qemuDomainSaveConfig(virDomainObjPtr obj) * To be followed with qemuDomainObjExitMonitor() once complete */ static int -qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; if (asyncJob != QEMU_ASYNC_JOB_NONE) { int ret; - if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0) + if ((ret = qemuDomainObjBeginNestedJob(obj, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); - qemuDomainObjEndJob(driver, obj); + qemuDomainObjEndJob(obj); return -1; } } else if (priv->job.asyncOwner == virThreadSelfID()) { @@ -5942,8 +5942,7 @@ qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver, } static void ATTRIBUTE_NONNULL(1) -qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver, - virDomainObjPtr obj) +qemuDomainObjExitMonitorInternal(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; bool hasRefs; @@ -5964,14 +5963,12 @@ qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver, priv->mon = NULL; if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjEndJob(driver, obj); + qemuDomainObjEndJob(obj); } -void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +void qemuDomainObjEnterMonitor(virDomainObjPtr obj) { - ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj, - QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuDomainObjEnterMonitorInternal(obj, QEMU_ASYNC_JOB_NONE)); } /* obj must NOT be locked before calling @@ -5984,10 +5981,9 @@ void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, * and replaced by the persistent definition, so pointers stolen * from the live definition could no longer be valid. */ -int qemuDomainObjExitMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +int qemuDomainObjExitMonitor(virDomainObjPtr obj) { - qemuDomainObjExitMonitorInternal(driver, obj); + qemuDomainObjExitMonitorInternal(obj); if (!virDomainObjIsActive(obj)) { if (virGetLastErrorCode() == VIR_ERR_OK) virReportError(VIR_ERR_OPERATION_FAILED, "%s", @@ -6012,11 +6008,10 @@ int qemuDomainObjExitMonitor(virQEMUDriverPtr driver, * in the meantime). */ int -qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { - return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob); + return qemuDomainObjEnterMonitorInternal(obj, asyncJob); } @@ -6948,10 +6943,10 @@ qemuDomainSnapshotDiscard(virQEMUDriverPtr driver, return -1; } else { priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); /* we continue on even in the face of error */ qemuMonitorDeleteSnapshot(priv->mon, snap->def->name); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } @@ -7111,12 +7106,12 @@ qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, { bool haveJob; - haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactive(driver, vm); if (haveJob) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -7132,12 +7127,12 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, { bool haveJob; - haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactiveLocked(driver, vm); if (haveJob) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -8152,18 +8147,17 @@ qemuDomainHasBlockjob(virDomainObjPtr vm, int -qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainUpdateDeviceList(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; char **aliases; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetDeviceAliases(priv->mon, &aliases); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc < 0) return -1; @@ -8175,8 +8169,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, int -qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainUpdateMemoryDeviceInfo(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -8187,12 +8180,12 @@ qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver, if (vm->def->nmems == 0) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetMemoryDeviceInfo(priv->mon, &meminfo); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { virHashFree(meminfo); return -1; } @@ -9458,7 +9451,6 @@ qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm) /** * qemuDomainRefreshVcpuInfo: - * @driver: qemu driver data * @vm: domain object * @asyncJob: current asynchronous job type * @state: refresh vcpu state @@ -9471,8 +9463,7 @@ qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm) * Returns 0 on success and -1 on fatal error. */ int -qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRefreshVcpuInfo(virDomainObjPtr vm, int asyncJob, bool state) { @@ -9493,13 +9484,13 @@ qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver, VIR_DEBUG("Maxvcpus %zu hotplug %d fast query %d", maxvcpus, hotplug, fast); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetCPUInfo(qemuDomainGetMonitor(vm), &info, maxvcpus, hotplug, fast); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) @@ -9611,7 +9602,6 @@ qemuDomainGetVcpuHalted(virDomainObjPtr vm, /** * qemuDomainRefreshVcpuHalted: - * @driver: qemu driver data * @vm: domain object * @asyncJob: current asynchronous job type * @@ -9620,8 +9610,7 @@ qemuDomainGetVcpuHalted(virDomainObjPtr vm, * Returns 0 on success and -1 on error */ int -qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRefreshVcpuHalted(virDomainObjPtr vm, int asyncJob) { virDomainVcpuDefPtr vcpu; @@ -9646,14 +9635,14 @@ qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver, QEMU_CAPS_QUERY_CPUS_FAST)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; fast = virQEMUCapsGet(QEMU_DOMAIN_PRIVATE(vm)->qemuCaps, QEMU_CAPS_QUERY_CPUS_FAST); haltedmap = qemuMonitorGetCpuHalted(qemuDomainGetMonitor(vm), maxvcpus, fast); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !haltedmap) + if (qemuDomainObjExitMonitor(vm) < 0 || !haltedmap) goto cleanup; for (i = 0; i < maxvcpus; i++) { @@ -10077,19 +10066,18 @@ qemuDomainVcpuPersistOrder(virDomainDefPtr def) int -qemuDomainCheckMonitor(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainCheckMonitor(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorCheck(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 507f710200..a247629c17 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -576,15 +576,12 @@ void qemuDomainEventFlush(int timer, void *opaque); qemuMonitorPtr qemuDomainGetMonitor(virDomainObjPtr vm) ATTRIBUTE_NONNULL(1); -void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +void qemuDomainObjEnterMonitor(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); -int qemuDomainObjExitMonitor(virQEMUDriverPtr driver, - virDomainObjPtr obj) +int qemuDomainObjExitMonitor(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjEnterMonitorAsync(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; @@ -800,11 +797,9 @@ extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig; extern virDomainABIStability virQEMUDriverDomainABIStability; extern virSaveCookieCallbacks virQEMUDriverDomainSaveCookie; -int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, - virDomainObjPtr vm, int asyncJob); +int qemuDomainUpdateDeviceList(virDomainObjPtr vm, int asyncJob); -int qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainUpdateMemoryDeviceInfo(virDomainObjPtr vm, int asyncJob); bool qemuDomainDefCheckABIStability(virQEMUDriverPtr driver, @@ -868,13 +863,11 @@ bool qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm); bool qemuDomainHasVcpuPids(virDomainObjPtr vm); pid_t qemuDomainGetVcpuPid(virDomainObjPtr vm, unsigned int vcpuid); int qemuDomainValidateVcpuInfo(virDomainObjPtr vm); -int qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainRefreshVcpuInfo(virDomainObjPtr vm, int asyncJob, bool state); bool qemuDomainGetVcpuHalted(virDomainObjPtr vm, unsigned int vcpu); -int qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainRefreshVcpuHalted(virDomainObjPtr vm, int asyncJob); bool qemuDomainSupportsNicdev(virDomainDefPtr def, @@ -969,8 +962,7 @@ bool qemuDomainVcpuHotplugIsInOrder(virDomainDefPtr def) void qemuDomainVcpuPersistOrder(virDomainDefPtr def) ATTRIBUTE_NONNULL(1); -int qemuDomainCheckMonitor(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainCheckMonitor(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); bool qemuDomainSupportsVideoVga(virDomainVideoDefPtr video, diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 1057f8d309..19c847dffc 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -219,8 +219,7 @@ qemuDomainTrackJob(qemuDomainJob job) void -qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjSetJobPhase(virDomainObjPtr obj, int phase) { qemuDomainObjPrivatePtr priv = obj->privateData; @@ -241,7 +240,7 @@ qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, priv->job.phase = phase; priv->job.asyncOwner = me; - qemuDomainObjSaveStatus(driver, obj); + priv->job.cb->saveStatus(obj); } void @@ -257,14 +256,13 @@ qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, } void -qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) +qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; - if (priv->job.active == QEMU_JOB_ASYNC_NESTED) qemuDomainObjResetJob(&priv->job); qemuDomainObjResetAsyncJob(&priv->job); - qemuDomainObjSaveStatus(driver, obj); + priv->job.cb->saveStatus(obj); } void @@ -313,7 +311,6 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, /** * qemuDomainObjBeginJobInternal: - * @driver: qemu driver * @obj: domain object * @job: qemuDomainJob to start * @asyncJob: qemuDomainAsyncJob to start @@ -334,8 +331,7 @@ qemuDomainObjCanSetJob(qemuDomainJobObjPtr job, * -1 otherwise. */ static int ATTRIBUTE_NONNULL(1) -qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginJobInternal(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj, qemuDomainJob job, qemuDomainAgentJob agentJob, @@ -441,7 +437,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, } if (qemuDomainTrackJob(job)) - qemuDomainObjSaveStatus(driver, obj); + jobObj->cb->saveStatus(obj); return 0; @@ -545,14 +541,13 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, * * Successful calls must be followed by EndJob eventually */ -int qemuDomainObjBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginJob(virDomainObjPtr obj, qemuDomainJob job) { qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJobObjPtr jobObj = &priv->job; - if (qemuDomainObjBeginJobInternal(driver, obj, jobObj, job, + if (qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, false) < 0) return -1; @@ -568,20 +563,18 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, * To end job call qemuDomainObjEndAgentJob. */ int -qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginAgentJob(virDomainObjPtr obj, qemuDomainAgentJob agentJob) { qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJobObjPtr jobObj = &priv->job; - return qemuDomainObjBeginJobInternal(driver, obj, jobObj, QEMU_JOB_NONE, + return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_NONE, agentJob, QEMU_ASYNC_JOB_NONE, false); } -int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) @@ -589,7 +582,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJobObjPtr jobObj = &priv->job; - if (qemuDomainObjBeginJobInternal(driver, obj, jobObj, QEMU_JOB_ASYNC, + if (qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC, QEMU_AGENT_JOB_NONE, asyncJob, false) < 0) return -1; @@ -600,8 +593,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, } int -qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginNestedJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; @@ -619,7 +611,7 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, priv->job.asyncOwner); } - return qemuDomainObjBeginJobInternal(driver, obj, jobObj, + return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC_NESTED, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, @@ -629,7 +621,6 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, /** * qemuDomainObjBeginJobNowait: * - * @driver: qemu driver * @obj: domain object * @job: qemuDomainJob to start * @@ -640,14 +631,13 @@ qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, * Returns: see qemuDomainObjBeginJobInternal */ int -qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, - virDomainObjPtr obj, +qemuDomainObjBeginJobNowait(virDomainObjPtr obj, qemuDomainJob job) { qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJobObjPtr jobObj = &priv->job; - return qemuDomainObjBeginJobInternal(driver, obj, jobObj, job, + return qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, true); } @@ -659,7 +649,7 @@ qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, * earlier qemuDomainBeginJob() call */ void -qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) +qemuDomainObjEndJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJobObjPtr jobObj = &priv->job; @@ -674,7 +664,7 @@ qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) qemuDomainObjResetJob(&priv->job); if (qemuDomainTrackJob(job)) - qemuDomainObjSaveStatus(driver, obj); + jobObj->cb->saveStatus(obj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ virCondBroadcast(&priv->job.cond); @@ -701,7 +691,7 @@ qemuDomainObjEndAgentJob(virDomainObjPtr obj) } void -qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) +qemuDomainObjEndAsyncJob(virDomainObjPtr obj) { qemuDomainObjPrivatePtr priv = obj->privateData; qemuDomainJobObjPtr jobObj = &priv->job; @@ -713,7 +703,7 @@ qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj) obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - qemuDomainObjSaveStatus(driver, obj); + jobObj->cb->saveStatus(obj); virCondBroadcast(&priv->job.asyncCond); } diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 11e7f2f432..ee71ae77e8 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -104,6 +104,7 @@ typedef qemuDomainJobObj *qemuDomainJobObjPtr; typedef void *(*qemuDomainObjPrivateJobAlloc)(void); typedef void (*qemuDomainObjPrivateJobFree)(void *); typedef void (*qemuDomainObjPrivateJobReset)(void *); +typedef void (*qemuSaveStatus)(virDomainObjPtr); typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, qemuDomainJobObjPtr); typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr, @@ -119,6 +120,7 @@ struct _qemuDomainObjPrivateJobCallbacks { qemuDomainObjPrivateJobAlloc allocJobPrivate; qemuDomainObjPrivateJobFree freeJobPrivate; qemuDomainObjPrivateJobReset resetJobPrivate; + qemuSaveStatus saveStatus; qemuDomainObjPrivateJobFormat formatJob; qemuDomainObjPrivateJobParse parseJob; qemuDomainObjJobInfoSetOperation setJobInfoOperation; @@ -164,44 +166,35 @@ const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, const char *phase); -int qemuDomainObjBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginJob(virDomainObjPtr obj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginAgentJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, qemuDomainAgentJob agentJob) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) G_GNUC_WARN_UNUSED_RESULT; -int qemuDomainObjBeginJobNowait(virQEMUDriverPtr driver, - virDomainObjPtr obj, +int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; -void qemuDomainObjEndJob(virQEMUDriverPtr driver, - virDomainObjPtr obj); +void qemuDomainObjEndJob(virDomainObjPtr obj); void qemuDomainObjEndAgentJob(virDomainObjPtr obj); -void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj); +void qemuDomainObjEndAsyncJob(virDomainObjPtr obj); void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj); -void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver, - virDomainObjPtr obj, +void qemuDomainObjSetJobPhase(virDomainObjPtr obj, int phase); void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, unsigned long long allowedJobs); int qemuDomainObjRestoreJob(virDomainObjPtr obj, qemuDomainJobObjPtr job); -void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, - virDomainObjPtr obj); +void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj); void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj); void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index ad3b657268..6623392495 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -207,8 +207,7 @@ qemuAutostartDomain(virDomainObjPtr vm, virResetLastError(); if (vm->autostart && !virDomainObjIsActive(vm)) { - if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) { + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to start job on VM '%s': %s"), vm->def->name, virGetLastErrorMessage()); @@ -222,7 +221,7 @@ qemuAutostartDomain(virDomainObjPtr vm, vm->def->name, virGetLastErrorMessage()); } - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); } ret = 0; @@ -1764,7 +1763,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, goto cleanup; def = NULL; - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) { qemuDomainRemoveInactiveJob(driver, vm); goto cleanup; @@ -1776,7 +1775,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, start_flags) < 0) { virDomainAuditStart(vm, "booted", false); qemuDomainRemoveInactive(driver, vm); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); goto cleanup; } @@ -1797,7 +1796,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, dom = virGetDomain(conn, vm->def->name, vm->def->uuid, vm->def->id); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: virDomainDefFree(def); @@ -1828,7 +1827,7 @@ static int qemuDomainSuspend(virDomainPtr dom) cfg = virQEMUDriverGetConfig(driver); priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_SUSPEND) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_SUSPEND) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1855,7 +1854,7 @@ static int qemuDomainSuspend(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -1881,7 +1880,7 @@ static int qemuDomainResume(virDomainPtr dom) if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1913,7 +1912,7 @@ static int qemuDomainResume(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -1932,8 +1931,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, int agentFlag = isReboot ? QEMU_AGENT_SHUTDOWN_REBOOT : QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1966,7 +1964,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1976,13 +1974,13 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, } qemuDomainSetFakeReboot(driver, vm, isReboot); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemPowerdown(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -2065,8 +2063,7 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, if (!isReboot) agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (!qemuDomainAgentAvailable(vm, agentForced)) @@ -2094,21 +2091,20 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, - QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) goto endjob; qemuDomainSetFakeReboot(driver, vm, isReboot); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemPowerdown(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -2171,7 +2167,6 @@ qemuDomainReboot(virDomainPtr dom, unsigned int flags) static int qemuDomainReset(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -2185,16 +2180,16 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) if (virDomainResetEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemReset(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; priv->fakeReboot = false; @@ -2204,7 +2199,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_CRASHED); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2258,7 +2253,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, reason == VIR_DOMAIN_PAUSED_STARTING_UP && !priv->beingDestroyed); - if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY, + if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, !(flags & VIR_DOMAIN_DESTROY_GRACEFUL)) < 0) goto cleanup; @@ -2289,7 +2284,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, endjob: if (ret == 0) qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2363,7 +2358,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2426,9 +2421,9 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, if (def) { priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); r = qemuMonitorSetBalloon(priv->mon, newmem); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || r < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || r < 0) goto endjob; /* Lack of balloon support is a fatal error */ @@ -2450,7 +2445,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2489,7 +2484,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2506,9 +2501,9 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); r = qemuMonitorSetMemoryStatsPeriod(priv->mon, def->memballoon, period); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (r < 0) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", @@ -2535,7 +2530,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2544,7 +2539,6 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) { - virQEMUDriverPtr driver = domain->conn->privateData; virDomainObjPtr vm = NULL; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -2559,19 +2553,19 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorInjectNMI(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -2585,7 +2579,6 @@ static int qemuDomainSendKey(virDomainPtr domain, int nkeycodes, unsigned int flags) { - virQEMUDriverPtr driver = domain->conn->privateData; virDomainObjPtr vm = NULL; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -2619,19 +2612,19 @@ static int qemuDomainSendKey(virDomainPtr domain, if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSendKey(priv->mon, holdtime, keycodes, nkeycodes); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -3323,7 +3316,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SAVE, VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0) goto cleanup; @@ -3417,7 +3410,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virErrorRestore(&save_err); } } - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); if (ret == 0) qemuDomainRemoveInactiveJob(driver, vm); @@ -3763,7 +3756,7 @@ qemuDumpToFd(virQEMUDriverPtr driver, else g_clear_pointer(&jobPriv->current, qemuDomainJobInfoFree); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (dumpformat) { @@ -3774,14 +3767,14 @@ qemuDumpToFd(virQEMUDriverPtr driver, _("unsupported dumpformat '%s' " "for this QEMU binary"), dumpformat); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); return -1; } } ret = qemuMonitorDumpToFd(priv->mon, fd, dumpformat, detach); - if ((qemuDomainObjExitMonitor(driver, vm) < 0) || ret < 0) + if ((qemuDomainObjExitMonitor(vm) < 0) || ret < 0) return -1; if (detach) @@ -3913,8 +3906,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (virDomainCoreDumpWithFormatEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAsyncJob(driver, vm, - QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) goto cleanup; @@ -3961,9 +3953,9 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } else if (((resume && paused) || (flags & VIR_DUMP_RESET)) && virDomainObjIsActive(vm)) { if ((ret == 0) && (flags & VIR_DUMP_RESET)) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemReset(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; } @@ -3981,7 +3973,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } } - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); if (ret == 0 && flags & VIR_DUMP_CRASH) qemuDomainRemoveInactiveJob(driver, vm); @@ -4031,7 +4023,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -4079,12 +4071,12 @@ qemuDomainScreenshot(virDomainPtr dom, qemuSecurityDomainSetPathLabel(driver, vm, tmp, false); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorScreendump(priv->mon, videoAlias, screen, tmp) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (VIR_CLOSE(tmp_fd) < 0) { @@ -4105,7 +4097,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (unlink_tmp) unlink(tmp); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -4152,8 +4144,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, switch (action) { case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: - if (qemuDomainObjBeginAsyncJob(driver, vm, - QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) { return; @@ -4181,7 +4172,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); } static int @@ -4230,7 +4221,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, bool removeInactive = false; unsigned long flags = VIR_DUMP_MEMORY_ONLY; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) return; @@ -4296,7 +4287,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); if (removeInactive) qemuDomainRemoveInactiveJob(driver, vm); } @@ -4313,7 +4304,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, VIR_DEBUG("Removing device %s from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4322,7 +4313,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, } if (STRPREFIX(devAlias, "vcpu")) { - qemuDomainRemoveVcpuAlias(driver, vm, devAlias); + qemuDomainRemoveVcpuAlias(vm, devAlias); } else { if (virDomainDefFindDevice(vm->def, devAlias, &dev, true) < 0) goto endjob; @@ -4336,7 +4327,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, devAlias); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -4537,8 +4528,7 @@ syncNicRxFilterMulticast(char *ifname, } static void -processNicRxFilterChangedEvent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +processNicRxFilterChangedEvent(virDomainObjPtr vm, const char *devAlias) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -4552,7 +4542,7 @@ processNicRxFilterChangedEvent(virQEMUDriverPtr driver, "from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4589,9 +4579,9 @@ processNicRxFilterChangedEvent(virQEMUDriverPtr driver, VIR_DEBUG("process NIC_RX_FILTER_CHANGED event for network " "device %s in domain %s", def->info.alias, vm->def->name); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorQueryRxFilter(priv->mon, devAlias, &guestFilter); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) goto endjob; @@ -4634,7 +4624,7 @@ processNicRxFilterChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virNetDevRxFilterFree(hostFilter); @@ -4680,7 +4670,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, memset(&dev, 0, sizeof(dev)); } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4721,13 +4711,12 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } static void -processBlockJobEvent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +processBlockJobEvent(virDomainObjPtr vm, const char *diskAlias, int type, int status) @@ -4735,7 +4724,7 @@ processBlockJobEvent(virQEMUDriverPtr driver, virDomainDiskDefPtr disk; g_autoptr(qemuBlockJobData) job = NULL; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4760,16 +4749,15 @@ processBlockJobEvent(virQEMUDriverPtr driver, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } static void -processJobStatusChangeEvent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +processJobStatusChangeEvent(virDomainObjPtr vm, qemuBlockJobDataPtr job) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4780,7 +4768,7 @@ processJobStatusChangeEvent(virQEMUDriverPtr driver, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -4795,7 +4783,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, unsigned int stopFlags = 0; virObjectEventPtr event = NULL; - if (qemuProcessBeginStopJob(driver, vm, QEMU_JOB_DESTROY, true) < 0) + if (qemuProcessBeginStopJob(vm, QEMU_JOB_DESTROY, true) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4826,7 +4814,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, endjob: qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } @@ -4924,20 +4912,19 @@ static void qemuProcessEventHandler(void *data, void *opaque) processDeviceDeletedEvent(driver, vm, processEvent->data); break; case QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED: - processNicRxFilterChangedEvent(driver, vm, processEvent->data); + processNicRxFilterChangedEvent(vm, processEvent->data); break; case QEMU_PROCESS_EVENT_SERIAL_CHANGED: processSerialChangedEvent(driver, vm, processEvent->data, processEvent->action); break; case QEMU_PROCESS_EVENT_BLOCK_JOB: - processBlockJobEvent(driver, vm, - processEvent->data, + processBlockJobEvent(vm, processEvent->data, processEvent->action, processEvent->status); break; case QEMU_PROCESS_EVENT_JOB_STATUS_CHANGE: - processJobStatusChangeEvent(driver, vm, processEvent->data); + processJobStatusChangeEvent(vm, processEvent->data); break; case QEMU_PROCESS_EVENT_MONITOR_EOF: processMonitorEOFEvent(driver, vm); @@ -5081,10 +5068,10 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (useAgent) { - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; } else { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; } @@ -5103,7 +5090,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (useAgent) qemuDomainObjEndAgentJob(vm); else - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -5226,7 +5213,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5265,7 +5252,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -5350,7 +5337,7 @@ qemuDomainPinEmulator(virDomainPtr dom, if (virDomainPinEmulatorEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5417,7 +5404,7 @@ qemuDomainPinEmulator(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: if (cgroup_emulator) @@ -5512,7 +5499,6 @@ qemuDomainGetVcpus(virDomainPtr dom, static int qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDefPtr def; int ret = -1; @@ -5536,7 +5522,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) goto cleanup; if (flags & VIR_DOMAIN_VCPU_GUEST) { - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -5593,16 +5579,15 @@ qemuDomainGetMaxVcpus(virDomainPtr dom) static int -qemuDomainGetIOThreadsMon(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetIOThreadsMon(virDomainObjPtr vm, qemuMonitorIOThreadInfoPtr **iothreads) { qemuDomainObjPrivatePtr priv = vm->privateData; int niothreads = 0; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); niothreads = qemuMonitorGetIOThreads(priv->mon, iothreads); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || niothreads < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || niothreads < 0) return -1; return niothreads; @@ -5610,8 +5595,7 @@ qemuDomainGetIOThreadsMon(virQEMUDriverPtr driver, static int -qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetIOThreadsLive(virDomainObjPtr vm, virDomainIOThreadInfoPtr **info) { qemuDomainObjPrivatePtr priv; @@ -5621,7 +5605,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -5637,7 +5621,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, goto endjob; } - if ((niothreads = qemuDomainGetIOThreadsMon(driver, vm, &iothreads)) < 0) + if ((niothreads = qemuDomainGetIOThreadsMon(vm, &iothreads)) < 0) goto endjob; /* Nothing to do */ @@ -5671,7 +5655,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, ret = niothreads; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: if (info_ret) { @@ -5753,7 +5737,6 @@ qemuDomainGetIOThreadInfo(virDomainPtr dom, virDomainIOThreadInfoPtr **info, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDefPtr targetDef = NULL; int ret = -1; @@ -5771,7 +5754,7 @@ qemuDomainGetIOThreadInfo(virDomainPtr dom, goto cleanup; if (!targetDef) - ret = qemuDomainGetIOThreadsLive(driver, vm, info); + ret = qemuDomainGetIOThreadsLive(vm, info); else ret = qemuDomainGetIOThreadsConfig(targetDef, info); @@ -5815,7 +5798,7 @@ qemuDomainPinIOThread(virDomainPtr dom, if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5904,7 +5887,7 @@ qemuDomainPinIOThread(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: if (cgroup_iothread) @@ -5916,8 +5899,7 @@ qemuDomainPinIOThread(virDomainPtr dom, } static int -qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainHotplugAddIOThread(virDomainObjPtr vm, unsigned int iothread_id) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5937,7 +5919,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, if (qemuMonitorCreateObjectProps(&props, "iothread", alias, NULL) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAddObject(priv->mon, &props, NULL) < 0) goto exit_monitor; @@ -5952,7 +5934,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, &new_iothreads)) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (new_niothreads != exp_niothreads) { @@ -6001,14 +5983,13 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, return ret; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } static int -qemuDomainHotplugModIOThread(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainHotplugModIOThread(virDomainObjPtr vm, qemuMonitorIOThreadInfo iothread) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -6020,11 +6001,11 @@ qemuDomainHotplugModIOThread(virQEMUDriverPtr driver, return -1; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetIOThread(priv->mon, &iothread); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc < 0) @@ -6035,8 +6016,7 @@ qemuDomainHotplugModIOThread(virQEMUDriverPtr driver, static int -qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainHotplugDelIOThread(virDomainObjPtr vm, unsigned int iothread_id) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -6052,7 +6032,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, if (!(alias = g_strdup_printf("iothread%u", iothread_id))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDelObject(priv->mon, alias, true); exp_niothreads--; @@ -6063,7 +6043,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, &new_iothreads)) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (new_niothreads != exp_niothreads) { @@ -6093,7 +6073,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver, return ret; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } @@ -6268,7 +6248,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -6286,7 +6266,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, if (qemuDomainAddIOThreadCheck(def, iothread.iothread_id) < 0) goto endjob; - if (qemuDomainHotplugAddIOThread(driver, vm, iothread.iothread_id) < 0) + if (qemuDomainHotplugAddIOThread(vm, iothread.iothread_id) < 0) goto endjob; break; @@ -6295,7 +6275,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, if (qemuDomainDelIOThreadCheck(def, iothread.iothread_id) < 0) goto endjob; - if (qemuDomainHotplugDelIOThread(driver, vm, iothread.iothread_id) < 0) + if (qemuDomainHotplugDelIOThread(vm, iothread.iothread_id) < 0) goto endjob; break; @@ -6308,7 +6288,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, goto endjob; } - if (qemuDomainHotplugModIOThread(driver, vm, iothread) < 0) + if (qemuDomainHotplugModIOThread(vm, iothread) < 0) goto endjob; break; @@ -6355,7 +6335,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -7047,14 +7027,14 @@ qemuDomainRestoreFlags(virConnectPtr conn, priv->hookRun = true; } - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_RESTORE, flags) < 0) goto cleanup; ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, data, path, false, QEMU_ASYNC_JOB_START); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: virDomainDefFree(def); @@ -7576,7 +7556,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) goto cleanup; @@ -7594,7 +7574,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) ret = 0; endjob: - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -7724,7 +7704,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!vm->persistent) { @@ -7820,7 +7800,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -7853,7 +7833,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, break; case VIR_DOMAIN_DEVICE_CONTROLLER: - ret = qemuDomainAttachControllerDevice(driver, vm, dev->data.controller); + ret = qemuDomainAttachControllerDevice(vm, dev->data.controller); if (!ret) { alias = dev->data.controller->info.alias; dev->data.controller = NULL; @@ -7940,7 +7920,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, break; case VIR_DOMAIN_DEVICE_INPUT: - ret = qemuDomainAttachInputDevice(driver, vm, dev->data.input); + ret = qemuDomainAttachInputDevice(vm, dev->data.input); if (ret == 0) { alias = dev->data.input->info.alias; dev->data.input = NULL; @@ -7948,7 +7928,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, break; case VIR_DOMAIN_DEVICE_VSOCK: - ret = qemuDomainAttachVsockDevice(driver, vm, dev->data.vsock); + ret = qemuDomainAttachVsockDevice(vm, dev->data.vsock); if (ret == 0) { alias = dev->data.vsock->info.alias; dev->data.vsock = NULL; @@ -7984,7 +7964,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, } if (ret == 0) - ret = qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE); + ret = qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE); return ret; } @@ -8076,7 +8056,7 @@ qemuDomainUpdateDeviceLive(virDomainObjPtr vm, return -1; } - ret = qemuDomainChangeNet(driver, vm, dev); + ret = qemuDomainChangeNet(vm, dev); break; case VIR_DOMAIN_DEVICE_FS: @@ -8792,7 +8772,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8804,7 +8784,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -8849,7 +8829,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8918,7 +8898,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainDefFree(vmdef); @@ -8988,7 +8968,7 @@ qemuDomainDetachDeviceLiveAndConfig(virQEMUDriverPtr driver, if ((rc = qemuDomainDetachDeviceLive(vm, dev_copy, driver, false)) < 0) goto cleanup; - if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; /* @@ -9071,7 +9051,7 @@ qemuDomainDetachDeviceAliasLiveAndConfig(virQEMUDriverPtr driver, if ((rc = qemuDomainDetachDeviceLive(vm, &dev, driver, true)) < 0) goto cleanup; - if (rc == 0 && qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (rc == 0 && qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; } @@ -9104,7 +9084,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -9116,7 +9096,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9139,7 +9119,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -9151,7 +9131,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9212,7 +9192,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, autostart = (autostart != 0); if (vm->autostart != autostart) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name))) @@ -9250,7 +9230,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, vm->autostart = autostart; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } ret = 0; @@ -9358,7 +9338,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9392,7 +9372,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9534,7 +9514,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; /* QEMU and LXC implementation are identical */ @@ -9565,7 +9545,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -9788,7 +9768,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, } } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9843,7 +9823,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virBitmapFree(nodeset); @@ -9997,7 +9977,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10039,7 +10019,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -10052,7 +10032,6 @@ qemuDomainGetPerfEvents(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuDomainObjPrivatePtr priv; virDomainDefPtr def; @@ -10072,7 +10051,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(def = virDomainObjGetOneDef(vm, flags))) @@ -10101,7 +10080,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -10275,7 +10254,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10509,7 +10488,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainDefFree(persistentDefCopy); @@ -10775,7 +10754,6 @@ qemuDomainBlockResize(virDomainPtr dom, unsigned long long size, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; int ret = -1; @@ -10804,7 +10782,7 @@ qemuDomainBlockResize(virDomainPtr dom, if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10838,18 +10816,18 @@ qemuDomainBlockResize(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorBlockResize(priv->mon, device, nodename, size) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -10874,7 +10852,6 @@ qemuDomainBlockStatsGatherTotals(qemuBlockStatsPtr data, /** * qemuDomainBlocksStatsGather: - * @driver: driver object * @vm: domain object * @path: to gather the statistics for * @capacity: refresh capacity of the backing image @@ -10885,8 +10862,7 @@ qemuDomainBlockStatsGatherTotals(qemuBlockStatsPtr data, * Returns -1 on error; number of filled block statistics on success. */ static int -qemuDomainBlocksStatsGather(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainBlocksStatsGather(virDomainObjPtr vm, const char *path, bool capacity, qemuBlockStatsPtr *retstats) @@ -10921,7 +10897,7 @@ qemuDomainBlocksStatsGather(virQEMUDriverPtr driver, } } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); nstats = qemuMonitorGetAllBlockStatsInfo(priv->mon, &blockstats, false); if (capacity && nstats >= 0) { @@ -10931,7 +10907,7 @@ qemuDomainBlocksStatsGather(virQEMUDriverPtr driver, rc = qemuMonitorBlockStatsUpdateCapacity(priv->mon, blockstats, false); } - if (qemuDomainObjExitMonitor(driver, vm) < 0 || nstats < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || nstats < 0 || rc < 0) goto cleanup; if (VIR_ALLOC(*retstats) < 0) @@ -10993,7 +10969,6 @@ qemuDomainBlockStats(virDomainPtr dom, const char *path, virDomainBlockStatsPtr stats) { - virQEMUDriverPtr driver = dom->conn->privateData; qemuBlockStatsPtr blockstats = NULL; int ret = -1; virDomainObjPtr vm; @@ -11004,13 +10979,13 @@ qemuDomainBlockStats(virDomainPtr dom, if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (qemuDomainBlocksStatsGather(driver, vm, path, false, &blockstats) < 0) + if (qemuDomainBlocksStatsGather(vm, path, false, &blockstats) < 0) goto endjob; if (VIR_ASSIGN_IS_OVERFLOW(stats->rd_req, blockstats->rd_req) || @@ -11027,7 +11002,7 @@ qemuDomainBlockStats(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -11043,7 +11018,6 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuBlockStatsPtr blockstats = NULL; int nstats; @@ -11062,13 +11036,13 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if ((nstats = qemuDomainBlocksStatsGather(driver, vm, path, false, + if ((nstats = qemuDomainBlocksStatsGather(vm, path, false, &blockstats)) < 0) goto endjob; @@ -11115,7 +11089,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, *nparams = nstats; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(blockstats); @@ -11207,7 +11181,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -11381,7 +11355,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virNetDevBandwidthFree(bandwidth); @@ -11501,8 +11475,7 @@ qemuDomainGetInterfaceParameters(virDomainPtr dom, /* This functions assumes that job QEMU_JOB_QUERY is started by a caller */ static int -qemuDomainMemoryStatsInternal(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainMemoryStatsInternal(virDomainObjPtr vm, virDomainMemoryStatPtr stats, unsigned int nr_stats) @@ -11514,10 +11487,10 @@ qemuDomainMemoryStatsInternal(virQEMUDriverPtr driver, return -1; if (virDomainDefHasMemballoon(vm->def)) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetMemoryStats(qemuDomainGetMonitor(vm), vm->def->memballoon, stats, nr_stats); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0 || ret >= nr_stats) @@ -11544,7 +11517,6 @@ qemuDomainMemoryStats(virDomainPtr dom, unsigned int nr_stats, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -11556,12 +11528,12 @@ qemuDomainMemoryStats(virDomainPtr dom, if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; - ret = qemuDomainMemoryStatsInternal(driver, vm, stats, nr_stats); + ret = qemuDomainMemoryStatsInternal(vm, stats, nr_stats); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -11661,7 +11633,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -11680,19 +11652,19 @@ qemuDomainMemoryPeek(virDomainPtr dom, qemuSecurityDomainSetPathLabel(driver, vm, tmp, false); priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (flags == VIR_MEMORY_VIRTUAL) { if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } } else { if (qemuMonitorSavePhysicalMemory(priv->mon, offset, size, tmp) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto endjob; } } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; /* Read the memory file into buffer. */ @@ -11706,7 +11678,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FORCE_CLOSE(fd); @@ -11941,7 +11913,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(disk = virDomainDiskByName(vm->def, path, false))) { @@ -11970,7 +11942,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, goto endjob; } - if (qemuDomainBlocksStatsGather(driver, vm, path, true, &entry) < 0) + if (qemuDomainBlocksStatsGather(vm, path, true, &entry) < 0) goto endjob; if (!entry->wr_highest_offset_valid) { @@ -12013,7 +11985,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(entry); virDomainObjEndAPI(&vm); @@ -13494,8 +13466,7 @@ qemuConnectBaselineHypervisorCPU(virConnectPtr conn, static int -qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetJobInfoMigrationStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -13507,13 +13478,13 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver, jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { if (events && jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && - qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_NONE, jobInfo, NULL) < 0) return -1; if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION && - qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_NONE, jobInfo) < 0) return -1; @@ -13526,20 +13497,19 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver, static int -qemuDomainGetJobInfoDumpStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetJobInfoDumpStats(virDomainObjPtr vm, qemuDomainJobInfoPtr jobInfo) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorDumpStats stats = { 0 }; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; rc = qemuMonitorQueryDump(priv->mon, &stats); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; jobInfo->stats.dump = stats; @@ -13577,8 +13547,7 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriverPtr driver, static int -qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetJobStatsInternal(virDomainObjPtr vm, bool completed, qemuDomainJobInfoPtr *jobInfo) { @@ -13602,7 +13571,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, return -1; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -13617,17 +13586,17 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, switch ((*jobInfo)->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobInfo) < 0) + if (qemuDomainGetJobInfoMigrationStats(vm, *jobInfo) < 0) goto cleanup; break; case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobInfo) < 0) + if (qemuDomainGetJobInfoDumpStats(vm, *jobInfo) < 0) goto cleanup; break; case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - if (qemuBackupGetJobInfoStats(driver, vm, *jobInfo) < 0) + if (qemuBackupGetJobInfoStats(vm, *jobInfo) < 0) goto cleanup; break; @@ -13638,7 +13607,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, ret = 0; cleanup: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -13647,7 +13616,6 @@ static int qemuDomainGetJobInfo(virDomainPtr dom, virDomainJobInfoPtr info) { - virQEMUDriverPtr driver = dom->conn->privateData; g_autoptr(qemuDomainJobInfo) jobInfo = NULL; virDomainObjPtr vm; int ret = -1; @@ -13660,7 +13628,7 @@ qemuDomainGetJobInfo(virDomainPtr dom, if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobInfo) < 0) + if (qemuDomainGetJobStatsInternal(vm, false, &jobInfo) < 0) goto cleanup; if (!jobInfo || @@ -13684,7 +13652,6 @@ qemuDomainGetJobStats(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; qemuDomainJobPrivatePtr jobPriv; @@ -13703,7 +13670,7 @@ qemuDomainGetJobStats(virDomainPtr dom, priv = vm->privateData; jobPriv = priv->job.privateData; - if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) + if (qemuDomainGetJobStatsInternal(vm, completed, &jobInfo) < 0) goto cleanup; if (!jobInfo || @@ -13735,9 +13702,9 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) VIR_DEBUG("Cancelling migration job at client request"); qemuDomainObjAbortAsyncJob(vm); - qemuDomainObjEnterMonitor(priv->driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateCancel(priv->mon); - if (qemuDomainObjExitMonitor(priv->driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -13746,7 +13713,6 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) static int qemuDomainAbortJob(virDomainPtr dom) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -13759,7 +13725,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_ABORT) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13828,7 +13794,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -13841,7 +13807,6 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, unsigned long long downtime, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; g_autoptr(qemuMigrationParams) migParams = NULL; @@ -13856,7 +13821,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13875,20 +13840,20 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, downtime) < 0) goto endjob; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetMigrationDowntime(priv->mon, downtime); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -13901,7 +13866,6 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, unsigned long long *downtime, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuMigrationParamsPtr migParams = NULL; int ret = -1; @@ -13915,13 +13879,13 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, &migParams) < 0) goto endjob; @@ -13941,7 +13905,7 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: qemuMigrationParamsFree(migParams); @@ -13955,7 +13919,6 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, unsigned long long *cacheSize, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; g_autoptr(qemuMigrationParams) migParams = NULL; @@ -13970,7 +13933,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13986,7 +13949,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, } if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_XBZRLE_CACHE_SIZE)) { - if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, &migParams) < 0) goto endjob; @@ -13995,16 +13958,16 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, cacheSize) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorGetMigrationCacheSize(priv->mon, cacheSize); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14016,7 +13979,6 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, unsigned long long cacheSize, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; g_autoptr(qemuMigrationParams) migParams = NULL; @@ -14031,7 +13993,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14056,20 +14018,20 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, cacheSize) < 0) goto endjob; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetMigrationCacheSize(priv->mon, cacheSize); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14081,7 +14043,6 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, unsigned long bandwidth, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; bool postcopy = !!(flags & VIR_DOMAIN_MIGRATE_MAX_SPEED_POSTCOPY); @@ -14117,7 +14078,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14142,15 +14103,15 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, bandwidth * 1024 * 1024) < 0) goto endjob; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_NONE, migParams) < 0) goto endjob; } else { int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; } @@ -14160,7 +14121,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14169,8 +14130,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, static int -qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, unsigned long *bandwidth) { g_autoptr(qemuMigrationParams) migParams = NULL; @@ -14178,13 +14138,13 @@ qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriverPtr driver, int rc; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) goto cleanup; - if (qemuMigrationParamsFetch(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuMigrationParamsFetch(vm, QEMU_ASYNC_JOB_NONE, &migParams) < 0) goto cleanup; @@ -14215,7 +14175,7 @@ qemuDomainMigrationGetPostcopyBandwidth(virQEMUDriverPtr driver, ret = 0; cleanup: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -14225,7 +14185,6 @@ qemuDomainMigrateGetMaxSpeed(virDomainPtr dom, unsigned long *bandwidth, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; bool postcopy = !!(flags & VIR_DOMAIN_MIGRATE_MAX_SPEED_POSTCOPY); @@ -14242,7 +14201,7 @@ qemuDomainMigrateGetMaxSpeed(virDomainPtr dom, goto cleanup; if (postcopy) { - if (qemuDomainMigrationGetPostcopyBandwidth(driver, vm, bandwidth) < 0) + if (qemuDomainMigrationGetPostcopyBandwidth(vm, bandwidth) < 0) goto cleanup; } else { *bandwidth = priv->migMaxBandwidth; @@ -14260,7 +14219,6 @@ static int qemuDomainMigrateStartPostCopy(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuDomainObjPrivatePtr priv; int ret = -1; @@ -14273,7 +14231,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14296,13 +14254,13 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, } VIR_DEBUG("Starting post-copy"); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateStartPostCopy(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -14518,14 +14476,13 @@ qemuDomainSnapshotCreateActiveInternal(virQEMUDriverPtr driver, } } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_SNAPSHOT) < 0) { + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0) { resume = false; goto cleanup; } ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) goto cleanup; @@ -15032,11 +14989,11 @@ qemuDomainSnapshotDiskCleanup(qemuDomainSnapshotDiskDataPtr data, * be set to NULL by qemuDomainSnapshotDiskUpdateSource */ if (data[i].src) { if (data[i].blockdevadded) { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data[i].crdata->srcdata[0]); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } @@ -15103,8 +15060,7 @@ qemuDomainSnapshotDiskBitmapsPropagate(qemuDomainSnapshotDiskDataPtr dd, static int -qemuDomainSnapshotDiskPrepareOneBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainSnapshotDiskPrepareOneBlockdev(virDomainObjPtr vm, qemuDomainSnapshotDiskDataPtr dd, virQEMUDriverConfigPtr cfg, bool reuse, @@ -15130,13 +15086,13 @@ qemuDomainSnapshotDiskPrepareOneBlockdev(virQEMUDriverPtr driver, return -1; if (reuse) { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), dd->crdata->srcdata[0]); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; } else { if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData, @@ -15244,7 +15200,7 @@ qemuDomainSnapshotDiskPrepareOne(virQEMUDriverPtr driver, dd->prepared = true; if (blockdev) { - if (qemuDomainSnapshotDiskPrepareOneBlockdev(driver, vm, dd, cfg, reuse, + if (qemuDomainSnapshotDiskPrepareOneBlockdev(vm, dd, cfg, reuse, blockNamedNodeData, asyncJob) < 0) return -1; @@ -15411,12 +15367,12 @@ qemuDomainSnapshotCreateDiskActive(virQEMUDriverPtr driver, goto cleanup; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorTransaction(priv->mon, &actions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rc = -1; for (i = 0; i < ndiskdata; i++) { @@ -15474,7 +15430,7 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) { int freeze; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) { @@ -15617,7 +15573,7 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, } if (thaw != 0 && - qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) >= 0 && + qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) >= 0 && virDomainObjIsActive(vm)) { if (qemuDomainSnapshotFSThaw(vm, ret == 0 && thaw > 0) < 0) { /* helper reported the error, if it was needed */ @@ -15777,7 +15733,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, * a regular job, so we need to set the job mask to disallow query as * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SNAPSHOT, VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0) goto cleanup; @@ -15915,7 +15871,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, virDomainSnapshotObjListRemove(vm->snapshots, snap); } - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -16371,8 +16327,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, goto cleanup; } - if (qemuProcessBeginJob(driver, vm, - VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT, + if (qemuProcessBeginJob(vm, VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT, flags) < 0) goto cleanup; @@ -16544,11 +16499,10 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, } } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_START) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_START) < 0) goto endjob; rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) { /* XXX resume domain if it was running before the @@ -16665,7 +16619,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, if (qemuDomainSnapshotRevertInactive(driver, vm, snap) < 0) { qemuDomainRemoveInactive(driver, vm); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); goto cleanup; } @@ -16690,7 +16644,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, virDomainAuditStart(vm, "from-snapshot", rc >= 0); if (rc < 0) { qemuDomainRemoveInactive(driver, vm); - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); goto cleanup; } detail = VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT; @@ -16727,7 +16681,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, ret = 0; endjob: - qemuProcessEndJob(driver, vm); + qemuProcessEndJob(vm); cleanup: if (ret == 0) { @@ -16821,7 +16775,7 @@ qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot, if (virDomainSnapshotDeleteEnsureACL(snapshot->domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot))) @@ -16894,7 +16848,7 @@ qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -17147,7 +17101,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, if (virDomainQemuMonitorCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17159,13 +17113,13 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -17327,8 +17281,7 @@ qemuDomainOpenChannel(virDomainPtr dom, * abort with pivot; this updates the VM definition as appropriate, on * either success or failure. */ static int -qemuDomainBlockPivot(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainBlockPivot(virDomainObjPtr vm, qemuBlockJobDataPtr job, virDomainDiskDefPtr disk) { @@ -17418,7 +17371,7 @@ qemuDomainBlockPivot(virQEMUDriverPtr driver, break; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (blockdev) { int rc = 0; @@ -17438,7 +17391,7 @@ qemuDomainBlockPivot(virQEMUDriverPtr driver, } else { ret = qemuMonitorDrivePivot(priv->mon, job->name); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; /* The pivot failed. The block job in QEMU remains in the synchronised state */ @@ -17463,7 +17416,6 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, unsigned int flags) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; const char *device = NULL; const char *jobname = NULL; virDomainDiskDefPtr disk; @@ -17485,7 +17437,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, goto cleanup; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17563,7 +17515,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, device = job->name; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (!blockdev && baseSource) basePath = qemuMonitorDiskNameLookup(priv->mon, device, disk->src, baseSource); @@ -17572,7 +17524,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, (!baseSource || basePath)) ret = qemuMonitorBlockStream(priv->mon, device, jobname, persistjob, basePath, nodebase, backingPath, speed); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -17581,7 +17533,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, qemuBlockJobStarted(job, vm); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: qemuBlockJobStartupFinalize(vm, job); @@ -17615,7 +17567,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17645,15 +17597,15 @@ qemuDomainBlockJobAbort(virDomainPtr dom, qemuBlockJobSyncBegin(job); if (pivot) { - if ((ret = qemuDomainBlockPivot(driver, vm, job, disk)) < 0) + if ((ret = qemuDomainBlockPivot(vm, job, disk)) < 0) goto endjob; } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (blockdev) ret = qemuMonitorJobCancel(priv->mon, job->name, false); else ret = qemuMonitorBlockJobCancel(priv->mon, job->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -1; goto endjob; } @@ -17698,7 +17650,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, endjob: if (job && !async) qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -17760,7 +17712,6 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, virDomainBlockJobInfoPtr info, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; virDomainDiskDefPtr disk; int ret = -1; @@ -17776,7 +17727,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17790,9 +17741,9 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetBlockJobInfo(qemuDomainGetMonitor(vm), job->name, &rawInfo); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret <= 0) goto endjob; @@ -17804,7 +17755,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -17818,7 +17769,6 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, unsigned long bandwidth, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainDiskDefPtr disk; int ret = -1; virDomainObjPtr vm; @@ -17844,7 +17794,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17859,15 +17809,15 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorBlockJobSetSpeed(qemuDomainGetMonitor(vm), job->name, speed); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -18046,7 +17996,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -18254,9 +18204,9 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, } if (data) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuBlockStorageSourceChainAttach(priv->mon, data); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) @@ -18275,7 +18225,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE; /* Actually start the mirroring */ - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (blockdev) { ret = qemuMonitorBlockdevMirror(priv->mon, job->name, true, @@ -18291,7 +18241,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, } virDomainAuditDisk(vm, NULL, mirror, "mirror", ret >= 0); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) { qemuDomainStorageSourceChainAccessRevoke(driver, vm, mirror); @@ -18309,12 +18259,12 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (ret < 0 && virDomainObjIsActive(vm)) { if (data || crdata) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (data) qemuBlockStorageSourceChainDetach(priv->mon, data); if (crdata) qemuBlockStorageSourceAttachRollback(priv->mon, crdata->srcdata[0]); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } if (need_revoke) qemuDomainStorageSourceChainAccessRevoke(driver, vm, mirror); @@ -18322,7 +18272,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (need_unlink && virStorageFileUnlink(mirror) < 0) VIR_WARN("%s", _("unable to remove just-created copy target")); virStorageFileDeinit(mirror); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); qemuBlockJobStartupFinalize(vm, job); return ret; @@ -18546,7 +18496,7 @@ qemuDomainBlockCommit(virDomainPtr dom, if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18723,7 +18673,7 @@ qemuDomainBlockCommit(virDomainPtr dom, device = job->name; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (!blockdev) { basePath = qemuMonitorDiskNameLookup(priv->mon, device, disk->src, @@ -18737,7 +18687,7 @@ qemuDomainBlockCommit(virDomainPtr dom, topPath, nodetop, basePath, nodebase, backingPath, speed); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || ret < 0) { + if (qemuDomainObjExitMonitor(vm) < 0 || ret < 0) { ret = -1; goto endjob; } @@ -18762,7 +18712,7 @@ qemuDomainBlockCommit(virDomainPtr dom, virErrorRestore(&orig_err); } qemuBlockJobStartupFinalize(vm, job); - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -18789,7 +18739,7 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18827,14 +18777,14 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) goto endjob; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, fd, "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH) != 0); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -18902,14 +18852,14 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom, if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1], "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); if (ret < 0) goto cleanup; @@ -19148,7 +19098,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, cfg = virQEMUDriverGetConfig(driver); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; priv = vm->privateData; @@ -19359,12 +19309,12 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, /* NB: Let's let QEMU decide how to handle issues with _length * via the JSON error code from the block_set_io_throttle call */ - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSetBlockIoThrottle(priv->mon, drivealias, qdevid, &info, supportMaxOptions, set_fields & QEMU_BLOCK_IOTUNE_SET_GROUP_NAME, supportMaxLengthOptions); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) goto endjob; @@ -19414,7 +19364,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(info.group_name); @@ -19433,7 +19383,6 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, unsigned int flags) { virDomainDiskDefPtr disk; - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuDomainObjPrivatePtr priv = NULL; virDomainDefPtr def = NULL; @@ -19459,7 +19408,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; /* the API check guarantees that only one of the definitions will be set */ @@ -19501,9 +19450,9 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (!(drivealias = qemuAliasDiskDriveFromDisk(disk))) goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetBlockIoThrottle(priv->mon, drivealias, qdevid, &reply); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (ret < 0) goto endjob; @@ -19572,7 +19521,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: VIR_FREE(reply.group_name); @@ -19586,7 +19535,6 @@ qemuDomainGetDiskErrors(virDomainPtr dom, unsigned int nerrors, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuDomainObjPrivatePtr priv; virHashTablePtr table = NULL; @@ -19606,7 +19554,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19617,9 +19565,9 @@ qemuDomainGetDiskErrors(virDomainPtr dom, goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); table = qemuMonitorGetBlockInfo(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (!table) goto endjob; @@ -19647,7 +19595,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, ret = n; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -19683,7 +19631,7 @@ qemuDomainSetMetadata(virDomainPtr dom, if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; ret = virDomainObjSetMetadata(vm, type, metadata, key, uri, @@ -19696,7 +19644,7 @@ qemuDomainSetMetadata(virDomainPtr dom, virObjectEventStateQueue(driver->domainEventState, ev); } - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -19776,17 +19724,16 @@ qemuDomainGetCPUStats(virDomainPtr domain, static int -qemuDomainProbeQMPCurrentMachine(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainProbeQMPCurrentMachine(virDomainObjPtr vm, bool *wakeupSupported) { qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorCurrentMachineInfo info = { 0 }; int rv; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rv = qemuMonitorGetCurrentMachineInfo(priv->mon, &info); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; @@ -19797,8 +19744,7 @@ qemuDomainProbeQMPCurrentMachine(virQEMUDriverPtr driver, /* returns -1 on error, or if query is not supported, 0 if query was successful */ static int -qemuDomainQueryWakeupSuspendSupport(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, bool *wakeupSupported) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -19807,29 +19753,28 @@ qemuDomainQueryWakeupSuspendSupport(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE)) return -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) goto endjob; - ret = qemuDomainProbeQMPCurrentMachine(driver, vm, wakeupSupported); + ret = qemuDomainProbeQMPCurrentMachine(vm, wakeupSupported); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } static int -qemuDomainPMSuspendAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainPMSuspendAgent(virDomainObjPtr vm, unsigned int target) { qemuAgentPtr agent; int ret = -1; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -19854,7 +19799,6 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom, unsigned long long duration, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; bool wakeupSupported; @@ -19889,7 +19833,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom, * that don't know about this cap, will keep their old behavior of * suspending 'in the dark'. */ - if (qemuDomainQueryWakeupSuspendSupport(driver, vm, &wakeupSupported) == 0) { + if (qemuDomainQueryWakeupSuspendSupport(vm, &wakeupSupported) == 0) { if (!wakeupSupported) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("Domain does not have suspend support")); @@ -19914,7 +19858,7 @@ qemuDomainPMSuspendForDuration(virDomainPtr dom, } } - ret = qemuDomainPMSuspendAgent(driver, vm, target); + ret = qemuDomainPMSuspendAgent(vm, target); cleanup: virDomainObjEndAPI(&vm); @@ -19925,7 +19869,6 @@ static int qemuDomainPMWakeup(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; qemuDomainObjPrivatePtr priv; @@ -19938,7 +19881,7 @@ qemuDomainPMWakeup(virDomainPtr dom, if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19946,13 +19889,13 @@ qemuDomainPMWakeup(virDomainPtr dom, priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemWakeup(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -19995,7 +19938,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20070,7 +20013,6 @@ qemuDomainFSTrim(virDomainPtr dom, unsigned long long minimum, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuAgentPtr agent; int ret = -1; @@ -20090,7 +20032,7 @@ qemuDomainFSTrim(virDomainPtr dom, if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -20254,14 +20196,13 @@ qemuConnectGetCPUModelNames(virConnectPtr conn, static int -qemuDomainGetHostnameAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetHostnameAgent(virDomainObjPtr vm, char **hostname) { qemuAgentPtr agent; int ret = -1; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -20282,8 +20223,7 @@ qemuDomainGetHostnameAgent(virQEMUDriverPtr driver, static int -qemuDomainGetHostnameLease(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetHostnameLease(virDomainObjPtr vm, char **hostname) { char macaddr[VIR_MAC_STRING_BUFLEN]; @@ -20293,7 +20233,7 @@ qemuDomainGetHostnameLease(virQEMUDriverPtr driver, size_t i, j; int ret = -1; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -20335,7 +20275,7 @@ qemuDomainGetHostnameLease(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -20344,7 +20284,6 @@ static char * qemuDomainGetHostname(virDomainPtr dom, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; char *hostname = NULL; @@ -20365,10 +20304,10 @@ qemuDomainGetHostname(virDomainPtr dom, goto cleanup; if (flags & VIR_DOMAIN_GET_HOSTNAME_AGENT) { - if (qemuDomainGetHostnameAgent(driver, vm, &hostname) < 0) + if (qemuDomainGetHostnameAgent(vm, &hostname) < 0) goto cleanup; } else if (flags & VIR_DOMAIN_GET_HOSTNAME_LEASE) { - if (qemuDomainGetHostnameLease(driver, vm, &hostname) < 0) + if (qemuDomainGetHostnameLease(vm, &hostname) < 0) goto cleanup; } @@ -20391,7 +20330,6 @@ qemuDomainGetTime(virDomainPtr dom, unsigned int *nseconds, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; int ret = -1; @@ -20405,7 +20343,7 @@ qemuDomainGetTime(virDomainPtr dom, if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20433,8 +20371,7 @@ qemuDomainGetTime(virDomainPtr dom, static int -qemuDomainSetTimeAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainSetTimeAgent(virDomainObjPtr vm, long long seconds, unsigned int nseconds, bool rtcSync) @@ -20442,7 +20379,7 @@ qemuDomainSetTimeAgent(virQEMUDriverPtr driver, qemuAgentPtr agent; int ret = -1; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -20467,7 +20404,6 @@ qemuDomainSetTime(virDomainPtr dom, unsigned int nseconds, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; bool rtcSync = flags & VIR_DOMAIN_TIME_SYNC; @@ -20496,10 +20432,10 @@ qemuDomainSetTime(virDomainPtr dom, goto cleanup; } - if (qemuDomainSetTimeAgent(driver, vm, seconds, nseconds, rtcSync) < 0) + if (qemuDomainSetTimeAgent(vm, seconds, nseconds, rtcSync) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20507,9 +20443,9 @@ qemuDomainSetTime(virDomainPtr dom, /* Don't try to call rtc-reset-reinjection if it's not available */ if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_RTC_RESET_REINJECTION)) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rv = qemuMonitorRTCResetReinjection(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rv < 0) @@ -20519,7 +20455,7 @@ qemuDomainSetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -20533,7 +20469,6 @@ qemuDomainFSFreeze(virDomainPtr dom, unsigned int nmountpoints, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -20545,7 +20480,7 @@ qemuDomainFSFreeze(virDomainPtr dom, if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20568,7 +20503,6 @@ qemuDomainFSThaw(virDomainPtr dom, unsigned int nmountpoints, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -20586,7 +20520,7 @@ qemuDomainFSThaw(virDomainPtr dom, if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -21015,7 +20949,7 @@ qemuDomainGetStatsMemory(virQEMUDriverPtr driver, static int -qemuDomainGetStatsBalloon(virQEMUDriverPtr driver, +qemuDomainGetStatsBalloon(virQEMUDriverPtr driver G_GNUC_UNUSED, virDomainObjPtr dom, virTypedParamListPtr params, unsigned int privflags) @@ -21041,7 +20975,7 @@ qemuDomainGetStatsBalloon(virQEMUDriverPtr driver, if (!HAVE_JOB(privflags) || !virDomainObjIsActive(dom)) return 0; - nr_stats = qemuDomainMemoryStatsInternal(driver, dom, stats, + nr_stats = qemuDomainMemoryStatsInternal(dom, stats, VIR_DOMAIN_MEMORY_STAT_NR); if (nr_stats < 0) return 0; @@ -21073,7 +21007,7 @@ qemuDomainGetStatsBalloon(virQEMUDriverPtr driver, static int -qemuDomainGetStatsVcpu(virQEMUDriverPtr driver, +qemuDomainGetStatsVcpu(virQEMUDriverPtr driver G_GNUC_UNUSED, virDomainObjPtr dom, virTypedParamListPtr params, unsigned int privflags) @@ -21098,7 +21032,7 @@ qemuDomainGetStatsVcpu(virQEMUDriverPtr driver, goto cleanup; if (HAVE_JOB(privflags) && virDomainObjIsActive(dom) && - qemuDomainRefreshVcpuHalted(driver, dom, QEMU_ASYNC_JOB_NONE) < 0) { + qemuDomainRefreshVcpuHalted(dom, QEMU_ASYNC_JOB_NONE) < 0) { /* it's ok to be silent and go ahead, because halted vcpu info * wasn't here from the beginning */ virResetLastError(); @@ -21519,7 +21453,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver, bool visitBacking = !!(privflags & QEMU_DOMAIN_STATS_BACKING); if (HAVE_JOB(privflags) && virDomainObjIsActive(dom)) { - qemuDomainObjEnterMonitor(driver, dom); + qemuDomainObjEnterMonitor(dom); rc = qemuMonitorGetAllBlockStatsInfo(priv->mon, &stats, visitBacking); @@ -21534,7 +21468,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver, if (fetchnodedata) nodedata = qemuMonitorQueryNamedBlockNodes(priv->mon); - if (qemuDomainObjExitMonitor(driver, dom) < 0) + if (qemuDomainObjExitMonitor(dom) < 0) goto cleanup; /* failure to retrieve stats is fine at this point */ @@ -21573,7 +21507,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver, static int -qemuDomainGetStatsIOThread(virQEMUDriverPtr driver, +qemuDomainGetStatsIOThread(virQEMUDriverPtr driver G_GNUC_UNUSED, virDomainObjPtr dom, virTypedParamListPtr params, unsigned int privflags) @@ -21590,7 +21524,7 @@ qemuDomainGetStatsIOThread(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) return 0; - if ((niothreads = qemuDomainGetIOThreadsMon(driver, dom, &iothreads)) < 0) + if ((niothreads = qemuDomainGetIOThreadsMon(dom, &iothreads)) < 0) return -1; /* qemuDomainGetIOThreadsMon returns a NULL-terminated list, so we must free @@ -21837,9 +21771,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, int rv; if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT) - rv = qemuDomainObjBeginJobNowait(driver, vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJobNowait(vm, QEMU_JOB_QUERY); else - rv = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY); if (rv == 0) domflags |= QEMU_DOMAIN_STATS_HAVE_JOB; @@ -21850,7 +21784,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, domflags |= QEMU_DOMAIN_STATS_BACKING; if (qemuDomainGetStats(conn, vm, stats, &tmp, domflags) < 0) { if (HAVE_JOB(domflags) && vm) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); virObjectUnlock(vm); goto cleanup; @@ -21860,7 +21794,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, tmpstats[nstats++] = tmp; if (HAVE_JOB(domflags)) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); virObjectUnlock(vm); } @@ -21901,15 +21835,13 @@ qemuNodeAllocPages(virConnectPtr conn, } static int -qemuDomainGetFSInfoAgent(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetFSInfoAgent(virDomainObjPtr vm, qemuAgentFSInfoPtr **info) { int ret = -1; qemuAgentPtr agent; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) return ret; if (virDomainObjCheckActive(vm) < 0) @@ -22003,7 +21935,6 @@ qemuDomainGetFSInfo(virDomainPtr dom, virDomainFSInfoPtr **info, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuAgentFSInfoPtr *agentinfo = NULL; int ret = -1; @@ -22017,10 +21948,10 @@ qemuDomainGetFSInfo(virDomainPtr dom, if (virDomainGetFSInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if ((nfs = qemuDomainGetFSInfoAgent(driver, vm, &agentinfo)) < 0) + if ((nfs = qemuDomainGetFSInfoAgent(vm, &agentinfo)) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -22029,7 +21960,7 @@ qemuDomainGetFSInfo(virDomainPtr dom, ret = virDomainFSInfoFormat(agentinfo, nfs, vm->def, info); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: g_free(agentinfo); @@ -22044,7 +21975,6 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, unsigned int source, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; int ret = -1; @@ -22066,7 +21996,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, break; case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT: - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -22104,7 +22034,6 @@ qemuDomainSetUserPassword(virDomainPtr dom, const char *password, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; qemuAgentPtr agent; int ret = -1; @@ -22118,7 +22047,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -22270,7 +22199,7 @@ static int qemuDomainRename(virDomainPtr dom, if (virDomainRenameEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { @@ -22317,7 +22246,7 @@ static int qemuDomainRename(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -22393,7 +22322,6 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, unsigned int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; qemuAgentCPUInfoPtr info = NULL; @@ -22408,7 +22336,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -22442,7 +22370,6 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, int state, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; virBitmapPtr map = NULL; qemuAgentCPUInfoPtr info = NULL; @@ -22467,7 +22394,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -22560,7 +22487,7 @@ qemuDomainSetVcpu(virDomainPtr dom, if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -22587,7 +22514,7 @@ qemuDomainSetVcpu(virDomainPtr dom, ret = qemuDomainSetVcpuInternal(driver, vm, def, persistentDef, map, !!state); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virBitmapFree(map); @@ -22602,7 +22529,6 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, unsigned long long threshold, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; qemuDomainObjPrivatePtr priv; virDomainObjPtr vm = NULL; virStorageSourcePtr src; @@ -22620,7 +22546,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -22637,7 +22563,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && !src->nodestorage && - qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockNodeNamesDetect(vm, QEMU_ASYNC_JOB_NONE) < 0) goto endjob; if (!src->nodestorage) { @@ -22649,15 +22575,15 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, nodename = g_strdup(src->nodestorage); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSetBlockThreshold(priv->mon, nodename, threshold); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto endjob; ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -22715,7 +22641,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -22746,7 +22672,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virDomainObjEndAPI(&vm); @@ -22827,8 +22753,7 @@ qemuNodeGetSEVInfo(virConnectPtr conn, static int -qemuDomainGetSEVMeasurement(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainGetSEVMeasurement(virDomainObjPtr vm, virTypedParameterPtr *params, int *nparams, unsigned int flags) @@ -22839,13 +22764,13 @@ qemuDomainGetSEVMeasurement(virQEMUDriverPtr driver, virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); tmp = qemuMonitorGetSEVMeasurement(QEMU_DOMAIN_PRIVATE(vm)->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (!tmp) @@ -22859,7 +22784,7 @@ qemuDomainGetSEVMeasurement(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); return ret; } @@ -22870,7 +22795,6 @@ qemuDomainGetLaunchSecurityInfo(virDomainPtr domain, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = domain->conn->privateData; virDomainObjPtr vm; int ret = -1; @@ -22881,7 +22805,7 @@ qemuDomainGetLaunchSecurityInfo(virDomainPtr domain, goto cleanup; if (vm->def->sev) { - if (qemuDomainGetSEVMeasurement(driver, vm, params, nparams, flags) < 0) + if (qemuDomainGetSEVMeasurement(vm, params, nparams, flags) < 0) goto cleanup; } @@ -23017,7 +22941,6 @@ qemuDomainGetGuestInfo(virDomainPtr dom, int *nparams, unsigned int flags) { - virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; qemuAgentPtr agent; int ret = -1; @@ -23041,8 +22964,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, if (virDomainGetGuestInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(driver, vm, - QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -23093,7 +23015,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuDomainObjEndAgentJob(vm); if (nfs > 0) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -23104,7 +23026,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuAgentFSInfoFormatParams(agentfsinfo, nfs, vm->def, params, nparams, &maxparams); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } cleanup: diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index 2c6c30ce03..fda824ad7b 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -99,14 +99,13 @@ qemuDomainDeleteDevice(virDomainObjPtr vm, const char *alias) { qemuDomainObjPrivatePtr priv = vm->privateData; - virQEMUDriverPtr driver = priv->driver; int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDelDevice(priv->mon, alias); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { /* Domain is no longer running. No cleanup needed. */ return -1; } @@ -231,7 +230,6 @@ qemuHotplugWaitForTrayEject(virDomainObjPtr vm, /** * qemuDomainChangeMediaLegacy: - * @driver: qemu driver structure * @vm: domain definition * @disk: disk definition to change the source of * @newsrc: new disk source to change to @@ -245,8 +243,7 @@ qemuHotplugWaitForTrayEject(virDomainObjPtr vm, * Returns 0 on success, -1 on error and reports libvirt error */ static int -qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeMediaLegacy(virDomainObjPtr vm, virDomainDiskDefPtr disk, virStorageSourcePtr newsrc, bool force) @@ -267,9 +264,9 @@ qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, if (!(driveAlias = qemuAliasDiskDriveFromDisk(disk))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorEjectMedia(priv->mon, driveAlias, force); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; /* If the tray is present wait for it to open. */ @@ -279,9 +276,9 @@ qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, return -1; /* re-issue ejection command to pop out the media */ - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorEjectMedia(priv->mon, driveAlias, false); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; } else { @@ -297,12 +294,12 @@ qemuDomainChangeMediaLegacy(virQEMUDriverPtr driver, if (virStorageSourceGetActualType(newsrc) != VIR_STORAGE_TYPE_DIR) format = virStorageFileFormatTypeToString(newsrc->format); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorChangeMedia(priv->mon, driveAlias, sourcestr, format); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } @@ -343,7 +340,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, if (!(props = qemuBuildDBusVMStateInfoProps(driver, vm))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorAddObject(priv->mon, &props, NULL); @@ -351,7 +348,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, if (ret == 0) priv->dbusVMState = true; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; @@ -370,8 +367,7 @@ qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, * Returns: 0 on success, -1 on error. */ int -qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -380,7 +376,7 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, if (!priv->dbusVMState) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorDelObject(priv->mon, qemuDomainGetDBusVMStateAlias(), true); @@ -388,7 +384,7 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, if (ret == 0) priv->dbusVMState = false; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; @@ -397,7 +393,6 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, /** * qemuHotplugAttachManagedPR: - * @driver: QEMU driver object * @vm: domain object * @src: new disk source to be attached to @vm * @asyncJob: asynchronous job identifier @@ -408,8 +403,7 @@ qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, * Returns: 0 on success, -1 on error. */ static int -qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuHotplugAttachManagedPR(virDomainObjPtr vm, virStorageSourcePtr src, qemuDomainAsyncJob asyncJob) { @@ -431,12 +425,12 @@ qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, daemonStarted = true; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorAddObject(priv->mon, &props, NULL); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; ret = 0; @@ -451,7 +445,6 @@ qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, /** * qemuHotplugRemoveManagedPR: - * @driver: QEMU driver object * @vm: domain object * @asyncJob: asynchronous job identifier * @@ -459,8 +452,7 @@ qemuHotplugAttachManagedPR(virQEMUDriverPtr driver, * it any more. */ static int -qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuHotplugRemoveManagedPR(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -472,11 +464,11 @@ qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ignore_value(qemuMonitorDelObject(priv->mon, qemuDomainGetManagedPRAlias(), false)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; qemuProcessKillManagedPRDaemon(vm); @@ -490,7 +482,6 @@ qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, /** * qemuDomainChangeMediaBlockdev: - * @driver: qemu driver structure * @vm: domain definition * @disk: disk definition to change the source of * @oldsrc: old source definition @@ -505,8 +496,7 @@ qemuHotplugRemoveManagedPR(virQEMUDriverPtr driver, * Returns 0 on success, -1 on error and reports libvirt error */ static int -qemuDomainChangeMediaBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeMediaBlockdev(virDomainObjPtr vm, virDomainDiskDefPtr disk, virStorageSourcePtr oldsrc, virStorageSourcePtr newsrc, @@ -533,16 +523,16 @@ qemuDomainChangeMediaBlockdev(virQEMUDriverPtr driver, } if (diskPriv->tray && disk->tray_status != VIR_DOMAIN_DISK_TRAY_OPEN) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorBlockdevTrayOpen(priv->mon, diskPriv->qomName, force); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (!force && qemuHotplugWaitForTrayEject(vm, disk) < 0) return -1; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorBlockdevMediumRemove(priv->mon, diskPriv->qomName); @@ -564,7 +554,7 @@ qemuDomainChangeMediaBlockdev(virQEMUDriverPtr driver, if (rc < 0 && newbackend) qemuBlockStorageSourceChainDetach(priv->mon, newbackend); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; return 0; @@ -628,13 +618,13 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, if (qemuDomainStorageSourceChainAccessAllow(driver, vm, newsrc) < 0) goto cleanup; - if (qemuHotplugAttachManagedPR(driver, vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachManagedPR(vm, newsrc, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) - rc = qemuDomainChangeMediaBlockdev(driver, vm, disk, oldsrc, newsrc, force); + rc = qemuDomainChangeMediaBlockdev(vm, disk, oldsrc, newsrc, force); else - rc = qemuDomainChangeMediaLegacy(driver, vm, disk, newsrc, force); + rc = qemuDomainChangeMediaLegacy(vm, disk, newsrc, force); virDomainAuditDisk(vm, oldsrc, newsrc, "update", rc >= 0); @@ -664,7 +654,7 @@ qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, /* remove PR manager object if unneeded */ if (managedpr) - ignore_value(qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE)); /* revert old image do the disk definition */ if (oldsrc) @@ -726,10 +716,10 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->disks, vm->def->ndisks + 1) < 0) goto cleanup; - if (qemuHotplugAttachManagedPR(driver, vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugAttachManagedPR(vm, disk->src, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuBlockStorageSourceChainAttach(priv->mon, data) < 0) goto exit_monitor; @@ -764,7 +754,7 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, VIR_WARN("failed to set blkdeviotune for '%s' of '%s'", disk->dst, vm->def->name); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -2; goto cleanup; } @@ -785,11 +775,11 @@ qemuDomainAttachDiskGeneric(virQEMUDriverPtr driver, ignore_value(qemuMonitorBlockdevDel(priv->mon, corAlias)); qemuBlockStorageSourceChainDetach(priv->mon, data); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -2; if (virStorageSourceChainHasManagedPR(disk->src) && - qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0) ret = -2; virDomainAuditDisk(vm, NULL, disk->src, "attach", false); @@ -820,8 +810,7 @@ qemuDomainAttachVirtioDiskDevice(virQEMUDriverPtr driver, } -int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachControllerDevice(virDomainObjPtr vm, virDomainControllerDefPtr controller) { int ret = -1; @@ -869,7 +858,7 @@ int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->controllers, vm->def->ncontrollers+1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if ((ret = qemuDomainAttachExtensionDevice(priv->mon, &controller->info)) < 0) { @@ -880,7 +869,7 @@ int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, ignore_value(qemuDomainDetachExtensionDevice(priv->mon, &controller->info)); exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; ret = -1; goto cleanup; @@ -897,8 +886,7 @@ int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, } static virDomainControllerDefPtr -qemuDomainFindOrCreateSCSIDiskController(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainFindOrCreateSCSIDiskController(virDomainObjPtr vm, int controller) { size_t i; @@ -939,7 +927,7 @@ qemuDomainFindOrCreateSCSIDiskController(virQEMUDriverPtr driver, VIR_INFO("No SCSI controller present, hotplugging one model=%s", virDomainControllerModelSCSITypeToString(cont->model)); - if (qemuDomainAttachControllerDevice(driver, vm, cont) < 0) { + if (qemuDomainAttachControllerDevice(vm, cont) < 0) { VIR_FREE(cont); return NULL; } @@ -985,7 +973,7 @@ qemuDomainAttachSCSIDisk(virQEMUDriverPtr driver, * exist; there must not be any missing index in between. */ for (i = 0; i <= disk->info.addr.drive.controller; i++) { - if (!qemuDomainFindOrCreateSCSIDiskController(driver, vm, i)) + if (!qemuDomainFindOrCreateSCSIDiskController(vm, i)) return -1; } @@ -1391,11 +1379,11 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, slirpfdName))) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (actualType == VIR_DOMAIN_NET_TYPE_VHOSTUSER) { if (qemuMonitorAttachCharDev(priv->mon, charDevAlias, net->data.vhostuser) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto cleanup; } @@ -1406,13 +1394,13 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, tapfd, tapfdName, tapfdSize, vhostfd, vhostfdName, vhostfdSize, slirpfd, slirpfdName) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } netdevPlugged = true; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; for (i = 0; i < tapfdSize; i++) @@ -1424,21 +1412,21 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, queueSize, priv->qemuCaps))) goto try_remove; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuDomainAttachExtensionDevice(priv->mon, &net->info) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } if (qemuMonitorAddDevice(priv->mon, nicstr) < 0) { ignore_value(qemuDomainDetachExtensionDevice(priv->mon, &net->info)); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; /* set link state */ @@ -1447,15 +1435,15 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("device alias not found: cannot set link state to down")); } else { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorSetLink(priv->mon, net->info.alias, VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virDomainAuditNet(vm, NULL, net, "attach", false); goto try_remove; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; } /* link set to down */ @@ -1528,7 +1516,7 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, netdev_name = g_strdup_printf("host%s", net->info.alias); if (QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp) qemuSlirpStop(QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp, vm, driver, net); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (charDevPlugged && qemuMonitorDetachCharDev(priv->mon, charDevAlias) < 0) VIR_WARN("Failed to remove associated chardev %s", charDevAlias); @@ -1536,7 +1524,7 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver, qemuMonitorRemoveNetdev(priv->mon, netdev_name) < 0) VIR_WARN("Failed to remove network backend for netdev %s", netdev_name); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&originalError); goto cleanup; } @@ -1634,7 +1622,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, if (!(devstr = qemuBuildPCIHostdevDevStr(vm->def, hostdev, 0, priv->qemuCaps))) goto error; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if ((ret = qemuDomainAttachExtensionDevice(priv->mon, hostdev->info)) < 0) goto exit_monitor; @@ -1643,7 +1631,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, ignore_value(qemuDomainDetachExtensionDevice(priv->mon, hostdev->info)); exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto error; virDomainAuditHostdev(vm, hostdev, "attach", ret == 0); @@ -1676,8 +1664,7 @@ qemuDomainAttachHostPCIDevice(virQEMUDriverPtr driver, void -qemuDomainDelTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainDelTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, const char *secAlias, const char *tlsAlias) @@ -1690,7 +1677,7 @@ qemuDomainDelTLSObjects(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; if (tlsAlias) @@ -1699,7 +1686,7 @@ qemuDomainDelTLSObjects(virQEMUDriverPtr driver, if (secAlias) ignore_value(qemuMonitorDelObject(priv->mon, secAlias, false)); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); cleanup: virErrorRestore(&orig_err); @@ -1707,8 +1694,7 @@ qemuDomainDelTLSObjects(virQEMUDriverPtr driver, int -qemuDomainAddTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainAddTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virJSONValuePtr *secProps, virJSONValuePtr *tlsProps) @@ -1720,7 +1706,7 @@ qemuDomainAddTLSObjects(virQEMUDriverPtr driver, if (!tlsProps && !secProps) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (secProps && *secProps && @@ -1731,13 +1717,13 @@ qemuDomainAddTLSObjects(virQEMUDriverPtr driver, qemuMonitorAddObject(priv->mon, tlsProps, NULL) < 0) goto error; - return qemuDomainObjExitMonitor(driver, vm); + return qemuDomainObjExitMonitor(vm); error: virErrorPreserveLast(&orig_err); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, NULL); + qemuDomainDelTLSObjects(vm, asyncJob, secAlias, NULL); return -1; } @@ -1816,7 +1802,7 @@ qemuDomainAddChardevTLSObjects(virQEMUDriverPtr driver, goto cleanup; dev->data.tcp.tlscreds = true; - if (qemuDomainAddTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + if (qemuDomainAddTLSObjects(vm, QEMU_ASYNC_JOB_NONE, &secProps, &tlsProps) < 0) goto cleanup; @@ -1857,13 +1843,13 @@ qemuDomainDelChardevTLSObjects(virQEMUDriverPtr driver, !(secAlias = qemuAliasForSecret(inAlias, NULL))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorDelObject(priv->mon, tlsAlias, false)); if (secAlias) ignore_value(qemuMonitorDelObject(priv->mon, secAlias, false)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return 0; @@ -1906,7 +1892,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, &tlsAlias, &secAlias) < 0) goto audit; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAttachCharDev(priv->mon, charAlias, @@ -1917,7 +1903,7 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, if (qemuMonitorAddDevice(priv->mon, devstr) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto audit; def->redirdevs[def->nredirdevs++] = redirdev; @@ -1934,9 +1920,9 @@ int qemuDomainAttachRedirdevDevice(virQEMUDriverPtr driver, /* detach associated chardev on error */ if (chardevAdded) ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2169,7 +2155,7 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, &tlsAlias, &secAlias) < 0) goto audit; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAttachCharDev(priv->mon, charAlias, chr->source) < 0) goto exit_monitor; @@ -2186,7 +2172,7 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto audit; qemuDomainChrInsertPreAlloced(vmdef, chr); @@ -2213,10 +2199,10 @@ int qemuDomainAttachChrDevice(virQEMUDriverPtr driver, /* detach associated chardev on error */ if (chardevAttached) qemuMonitorDetachCharDev(priv->mon, charAlias); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2278,7 +2264,7 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, goto audit; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (rng->backend == VIR_DOMAIN_RNG_BACKEND_EGD && qemuMonitorAttachCharDev(priv->mon, charAlias, @@ -2297,7 +2283,7 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -2327,11 +2313,11 @@ qemuDomainAttachRNGDevice(virQEMUDriverPtr driver, ignore_value(qemuMonitorDelObject(priv->mon, objAlias, false)); if (rng->backend == VIR_DOMAIN_RNG_BACKEND_EGD && chardevAdded) ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) releaseaddr = false; virErrorRestore(&orig_err); - qemuDomainDelTLSObjects(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuDomainDelTLSObjects(vm, QEMU_ASYNC_JOB_NONE, secAlias, tlsAlias); goto audit; } @@ -2412,7 +2398,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, if (qemuDomainAdjustMaxMemLock(vm, false) < 0) goto removedef; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorAddObject(priv->mon, &props, NULL) < 0) goto exit_monitor; objAdded = true; @@ -2420,7 +2406,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, if (qemuMonitorAddDevice(priv->mon, devstr) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { /* we shouldn't touch mem now, as the def might be freed */ mem = NULL; goto audit; @@ -2430,14 +2416,13 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, virObjectEventStateQueue(driver->domainEventState, event); /* fix the balloon size */ - ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE)); /* mem is consumed by vm->def */ mem = NULL; /* this step is best effort, removing the device would be so much trouble */ - ignore_value(qemuDomainUpdateMemoryDeviceInfo(driver, vm, - QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuDomainUpdateMemoryDeviceInfo(vm, QEMU_ASYNC_JOB_NONE)); ret = 0; @@ -2462,7 +2447,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); if (objAdded) ignore_value(qemuMonitorDelObject(priv->mon, objalias, false)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) mem = NULL; if (objAdded && mem) @@ -2528,9 +2513,9 @@ qemuDomainAttachHostUSBDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs+1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorAddDevice(priv->mon, devstr); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -1; goto cleanup; } @@ -2583,7 +2568,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, * exist; there must not be any missing index in between. */ for (i = 0; i <= hostdev->info->addr.drive.controller; i++) { - if (!qemuDomainFindOrCreateSCSIDiskController(driver, vm, i)) + if (!qemuDomainFindOrCreateSCSIDiskController(vm, i)) return -1; } @@ -2618,7 +2603,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs + 1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuBlockStorageSourceAttachApply(priv->mon, data) < 0) goto exit_monitor; @@ -2626,7 +2611,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, if (qemuMonitorAddDevice(priv->mon, devstr) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; virDomainAuditHostdev(vm, hostdev, "attach", true); @@ -2653,7 +2638,7 @@ qemuDomainAttachHostSCSIDevice(virQEMUDriverPtr driver, exit_monitor: virErrorPreserveLast(&orig_err); qemuBlockStorageSourceAttachRollback(priv->mon, data); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); virErrorRestore(&orig_err); virDomainAuditHostdev(vm, hostdev, "attach", false); @@ -2730,7 +2715,7 @@ qemuDomainAttachSCSIVHostDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->hostdevs, vm->def->nhostdevs + 1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if ((ret = qemuDomainAttachExtensionDevice(priv->mon, hostdev->info)) < 0) goto exit_monitor; @@ -2742,7 +2727,7 @@ qemuDomainAttachSCSIVHostDevice(virQEMUDriverPtr driver, } exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0 || ret < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || ret < 0) goto audit; vm->def->hostdevs[vm->def->nhostdevs++] = hostdev; @@ -2837,9 +2822,9 @@ qemuDomainAttachMediatedDevice(virQEMUDriverPtr driver, goto cleanup; teardownmemlock = true; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorAddDevice(priv->mon, devstr); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { ret = -1; goto cleanup; } @@ -2978,7 +2963,7 @@ qemuDomainAttachShmemDevice(virQEMUDriverPtr driver, } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (shmem->server.enabled) { if (qemuMonitorAttachCharDev(priv->mon, charAlias, @@ -2999,7 +2984,7 @@ qemuDomainAttachShmemDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { release_address = false; goto cleanup; } @@ -3031,7 +3016,7 @@ qemuDomainAttachShmemDevice(virQEMUDriverPtr driver, ignore_value(qemuMonitorDelObject(priv->mon, memAlias, false)); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) release_address = false; virErrorRestore(&orig_err); @@ -3085,14 +3070,14 @@ qemuDomainAttachWatchdog(virQEMUDriverPtr driver, actionStr = virDomainWatchdogActionTypeToString(actualAction); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rv = qemuMonitorSetWatchdogAction(priv->mon, actionStr); if (rv >= 0) rv = qemuMonitorAddDevice(priv->mon, watchdogstr); - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseAddress = false; goto cleanup; } @@ -3112,8 +3097,7 @@ qemuDomainAttachWatchdog(virQEMUDriverPtr driver, int -qemuDomainAttachInputDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainAttachInputDevice(virDomainObjPtr vm, virDomainInputDefPtr input) { int ret = -1; @@ -3165,7 +3149,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, if (VIR_REALLOC_N(vm->def->inputs, vm->def->ninputs + 1) < 0) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuDomainAttachExtensionDevice(priv->mon, &input->info) < 0) goto exit_monitor; @@ -3175,7 +3159,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -3204,7 +3188,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, return ret; exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -3213,8 +3197,7 @@ qemuDomainAttachInputDevice(virQEMUDriverPtr driver, int -qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainAttachVsockDevice(virDomainObjPtr vm, virDomainVsockDefPtr vsock) { qemuDomainVsockPrivatePtr vsockPriv = (qemuDomainVsockPrivatePtr)vsock->privateData; @@ -3248,7 +3231,7 @@ qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, if (!(devstr = qemuBuildVsockDevStr(vm->def, vsock, priv->qemuCaps, fdprefix))) goto cleanup; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuDomainAttachExtensionDevice(priv->mon, &vsock->info) < 0) goto exit_monitor; @@ -3258,7 +3241,7 @@ qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, goto exit_monitor; } - if (qemuDomainObjExitMonitor(driver, vm) < 0) { + if (qemuDomainObjExitMonitor(vm) < 0) { releaseaddr = false; goto cleanup; } @@ -3278,7 +3261,7 @@ qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, return ret; exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) releaseaddr = false; goto cleanup; } @@ -3421,8 +3404,7 @@ qemuDomainChangeNetFilter(virDomainObjPtr vm, return 0; } -int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeNetLinkState(virDomainObjPtr vm, virDomainNetDefPtr dev, int linkstate) { @@ -3437,7 +3419,7 @@ int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, VIR_DEBUG("dev: %s, state: %d", dev->info.alias, linkstate); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSetLink(priv->mon, dev->info.alias, linkstate); if (ret < 0) @@ -3447,15 +3429,14 @@ int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, dev->linkstate = linkstate; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; return ret; } int -qemuDomainChangeNet(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeNet(virDomainObjPtr vm, virDomainDeviceDefPtr dev) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -3879,7 +3860,7 @@ qemuDomainChangeNet(virQEMUDriverPtr driver, } if (needLinkStateChange && - qemuDomainChangeNetLinkState(driver, vm, olddev, newdev->linkstate) < 0) { + qemuDomainChangeNetLinkState(vm, olddev, newdev->linkstate) < 0) { goto cleanup; } @@ -3970,8 +3951,7 @@ qemuDomainFindGraphicsIndex(virDomainDefPtr def, int -qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainChangeGraphicsPasswords(virDomainObjPtr vm, int type, virDomainGraphicsAuthDefPtr auth, const char *defaultPasswd, @@ -3993,7 +3973,7 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, if (auth->connected) connected = virDomainGraphicsAuthConnectedTypeToString(auth->connected); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return ret; ret = qemuMonitorSetPassword(priv->mon, type, password, connected); @@ -4013,7 +3993,7 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, ret = qemuMonitorExpirePassword(priv->mon, type, expire); end_job: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -4117,8 +4097,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver, dev->data.vnc.auth.passwd)) { VIR_DEBUG("Updating password on VNC server %p %p", dev->data.vnc.auth.passwd, cfg->vncPassword); - if (qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_VNC, + if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC, &dev->data.vnc.auth, cfg->vncPassword, QEMU_ASYNC_JOB_NONE) < 0) @@ -4165,8 +4144,7 @@ qemuDomainChangeGraphics(virQEMUDriverPtr driver, dev->data.spice.auth.passwd)) { VIR_DEBUG("Updating password on SPICE server %p %p", dev->data.spice.auth.passwd, cfg->spicePassword); - if (qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_SPICE, + if (qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE, &dev->data.spice.auth, cfg->spicePassword, QEMU_ASYNC_JOB_NONE) < 0) @@ -4280,7 +4258,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, } } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (corAlias) ignore_value(qemuMonitorBlockdevDel(priv->mon, corAlias)); @@ -4288,7 +4266,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, if (diskBackend) qemuBlockStorageSourceChainDetach(priv->mon, diskBackend); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; virDomainAuditDisk(vm, disk->src, NULL, "detach", true); @@ -4304,7 +4282,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, ignore_value(qemuRemoveSharedDevice(driver, &dev, vm->def->name)); if (virStorageSourceChainHasManagedPR(disk->src) && - qemuHotplugRemoveManagedPR(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + qemuHotplugRemoveManagedPR(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; ret = 0; @@ -4354,9 +4332,9 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, backendAlias = g_strdup_printf("mem%s", mem->info.alias); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDelObject(priv->mon, backendAlias, true); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rc = -1; virDomainAuditMemory(vm, oldmem, newmem, "update", rc == 0); @@ -4381,7 +4359,7 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, virDomainMemoryDefFree(mem); /* fix the balloon size */ - ignore_value(qemuProcessRefreshBalloonState(driver, vm, QEMU_ASYNC_JOB_NONE)); + ignore_value(qemuProcessRefreshBalloonState(vm, QEMU_ASYNC_JOB_NONE)); /* decrease the mlock limit after memory unplug if necessary */ ignore_value(qemuDomainAdjustMaxMemLock(vm, false)); @@ -4452,9 +4430,9 @@ qemuDomainRemoveHostDevice(virQEMUDriverPtr driver, detachscsi = qemuBuildHostdevSCSIDetachPrepare(hostdev, priv->qemuCaps); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); qemuBlockStorageSourceAttachRollback(priv->mon, detachscsi); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } @@ -4565,9 +4543,9 @@ qemuDomainRemoveNetDevice(virQEMUDriverPtr driver, */ ignore_value(qemuInterfaceStopDevice(net)); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorRemoveNetdev(priv->mon, hostnet_name) < 0) { - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; virDomainAuditNet(vm, net, NULL, "detach", false); return -1; @@ -4583,7 +4561,7 @@ qemuDomainRemoveNetDevice(virQEMUDriverPtr driver, } } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp) @@ -4651,9 +4629,9 @@ qemuDomainRemoveChrDevice(virQEMUDriverPtr driver, return -1; if (monitor) { - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorDetachCharDev(priv->mon, charAlias); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } @@ -4710,7 +4688,7 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, if (!(charAlias = qemuAliasChardevFromDevAlias(rng->info.alias))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (qemuMonitorDelObject(priv->mon, objAlias, true) < 0) rc = -1; @@ -4720,7 +4698,7 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, qemuMonitorDetachCharDev(priv->mon, charAlias) < 0) rc = -1; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rng->backend == VIR_DOMAIN_RNG_BACKEND_EGD && @@ -4749,8 +4727,7 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, static int -qemuDomainRemoveShmemDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRemoveShmemDevice(virDomainObjPtr vm, virDomainShmemDefPtr shmem) { int rc; @@ -4768,14 +4745,14 @@ qemuDomainRemoveShmemDevice(virQEMUDriverPtr driver, memAlias = g_strdup_printf("shmmem-%s", shmem->info.alias); } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (shmem->server.enabled) rc = qemuMonitorDetachCharDev(priv->mon, charAlias); else rc = qemuMonitorDelObject(priv->mon, memAlias, true); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; virDomainAuditShmem(vm, shmem, "detach", rc == 0); @@ -4864,13 +4841,13 @@ qemuDomainRemoveRedirdevDevice(virQEMUDriverPtr driver, if (!(charAlias = qemuAliasChardevFromDevAlias(dev->info.alias))) return -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); /* DeviceDel from Detach may remove chardev, * so we cannot rely on return status to delete TLS chardevs. */ ignore_value(qemuMonitorDetachCharDev(priv->mon, charAlias)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (qemuDomainDelChardevTLSObjects(driver, vm, dev->source, charAlias) < 0) @@ -5009,7 +4986,7 @@ qemuDomainRemoveDevice(virQEMUDriverPtr driver, return -1; break; case VIR_DOMAIN_DEVICE_SHMEM: - if (qemuDomainRemoveShmemDevice(driver, vm, dev->data.shmem) < 0) + if (qemuDomainRemoveShmemDevice(vm, dev->data.shmem) < 0) return -1; break; case VIR_DOMAIN_DEVICE_INPUT: @@ -5553,9 +5530,9 @@ qemuDomainDetachDeviceChr(virQEMUDriverPtr driver, if (guestfwd) { int rc; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorRemoveNetdev(priv->mon, tmpChr->info.alias); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rc = -1; if (rc < 0) @@ -5914,8 +5891,7 @@ qemuDomainDetachDeviceLive(virDomainObjPtr vm, static int -qemuDomainRemoveVcpu(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRemoveVcpu(virDomainObjPtr vm, unsigned int vcpu) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5926,7 +5902,7 @@ qemuDomainRemoveVcpu(virQEMUDriverPtr driver, virErrorPtr save_error = NULL; size_t i; - if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0) return -1; /* validation requires us to set the expected state prior to calling it */ @@ -5960,8 +5936,7 @@ qemuDomainRemoveVcpu(virQEMUDriverPtr driver, void -qemuDomainRemoveVcpuAlias(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuDomainRemoveVcpuAlias(virDomainObjPtr vm, const char *alias) { virDomainVcpuDefPtr vcpu; @@ -5973,7 +5948,7 @@ qemuDomainRemoveVcpuAlias(virQEMUDriverPtr driver, vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu); if (STREQ_NULLABLE(alias, vcpupriv->alias)) { - qemuDomainRemoveVcpu(driver, vm, i); + qemuDomainRemoveVcpu(vm, i); return; } } @@ -6016,7 +5991,7 @@ qemuDomainHotplugDelVcpu(virQEMUDriverPtr driver, goto cleanup; } - if (qemuDomainRemoveVcpu(driver, vm, vcpu) < 0) + if (qemuDomainRemoveVcpu(vm, vcpu) < 0) goto cleanup; qemuDomainVcpuPersistOrder(vm->def); @@ -6055,7 +6030,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, goto cleanup; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); if (newhotplug) { rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops); @@ -6064,7 +6039,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, rc = qemuMonitorSetCPU(qemuDomainGetMonitor(vm), vcpu, true); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; virDomainAuditVcpu(vm, oldvcpus, oldvcpus + nvcpus, "update", rc == 0); @@ -6076,7 +6051,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, if (newhotplug) vm->def->individualvcpus = true; - if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, QEMU_ASYNC_JOB_NONE, false) < 0) goto cleanup; /* validation requires us to set the expected state prior to calling it */ diff --git a/src/qemu/qemu_hotplug.h b/src/qemu/qemu_hotplug.h index 4a49e04a15..4d0748ea60 100644 --- a/src/qemu/qemu_hotplug.h +++ b/src/qemu/qemu_hotplug.h @@ -31,14 +31,12 @@ int qemuDomainChangeEjectableMedia(virQEMUDriverPtr driver, virStorageSourcePtr newsrc, bool force); -void qemuDomainDelTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +void qemuDomainDelTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, const char *secAlias, const char *tlsAlias); -int qemuDomainAddTLSObjects(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAddTLSObjects(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virJSONValuePtr *secProps, virJSONValuePtr *tlsProps); @@ -52,8 +50,7 @@ int qemuDomainGetTLSObjects(virQEMUCapsPtr qemuCaps, virJSONValuePtr *tlsProps, virJSONValuePtr *secProps); -int qemuDomainAttachControllerDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachControllerDevice(virDomainObjPtr vm, virDomainControllerDefPtr controller); int qemuDomainAttachDeviceDiskLive(virQEMUDriverPtr driver, virDomainObjPtr vm, @@ -81,26 +78,21 @@ int qemuDomainAttachMemory(virQEMUDriverPtr driver, int qemuDomainChangeGraphics(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainGraphicsDefPtr dev); -int qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeGraphicsPasswords(virDomainObjPtr vm, int type, virDomainGraphicsAuthDefPtr auth, const char *defaultPasswd, int asyncJob); -int qemuDomainChangeNet(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeNet(virDomainObjPtr vm, virDomainDeviceDefPtr dev); -int qemuDomainChangeNetLinkState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainChangeNetLinkState(virDomainObjPtr vm, virDomainNetDefPtr dev, int linkstate); -int qemuDomainAttachInputDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachInputDevice(virDomainObjPtr vm, virDomainInputDefPtr input); -int qemuDomainAttachVsockDevice(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuDomainAttachVsockDevice(virDomainObjPtr vm, virDomainVsockDefPtr vsock); int qemuDomainAttachLease(virQEMUDriverPtr driver, @@ -118,8 +110,7 @@ int qemuDomainDetachDeviceLive(virDomainObjPtr vm, virQEMUDriverPtr driver, bool async); -void qemuDomainRemoveVcpuAlias(virQEMUDriverPtr driver, - virDomainObjPtr vm, +void qemuDomainRemoveVcpuAlias(virDomainObjPtr vm, const char *alias); int @@ -157,6 +148,5 @@ int qemuHotplugAttachDBusVMState(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); -int qemuHotplugRemoveDBusVMState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuHotplugRemoveDBusVMState(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index c517774c9f..601c11221d 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -82,21 +82,18 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, ); static int -qemuMigrationJobStart(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStart(virDomainObjPtr vm, qemuDomainAsyncJob job, unsigned long apiFlags) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; static void -qemuMigrationJobSetPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobSetPhase(virDomainObjPtr vm, qemuMigrationJobPhase phase) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); static void -qemuMigrationJobStartPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStartPhase(virDomainObjPtr vm, qemuMigrationJobPhase phase) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); @@ -110,8 +107,7 @@ qemuMigrationJobIsActive(virDomainObjPtr vm, ATTRIBUTE_NONNULL(1); static void -qemuMigrationJobFinish(virQEMUDriverPtr driver, - virDomainObjPtr obj) +qemuMigrationJobFinish(virDomainObjPtr obj) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); static void @@ -424,8 +420,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, devicename = diskAlias; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) goto cleanup; if (port == 0) { @@ -441,7 +436,7 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, if (qemuMonitorNBDServerAdd(priv->mon, devicename, exportname, true, NULL) < 0) goto exit_monitor; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; } @@ -454,14 +449,13 @@ qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver, return ret; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } static int -qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstStopNBDServer(virDomainObjPtr vm, qemuMigrationCookiePtr mig) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -469,13 +463,12 @@ qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver, if (!mig->nbd) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) return -1; if (qemuMonitorNBDServerStop(priv->mon) < 0) VIR_WARN("Unable to stop NBD server"); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; virPortAllocatorRelease(priv->nbdPort); @@ -646,8 +639,7 @@ qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm, * -1 on error or when job failed and failNoJob is true. */ static int -qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDCopyCancelOne(virDomainObjPtr vm, virDomainDiskDefPtr disk, qemuBlockJobDataPtr job, bool failNoJob, @@ -669,12 +661,12 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, return 1; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rv = qemuMonitorBlockJobCancel(priv->mon, job->name); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; return 0; @@ -683,7 +675,6 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, /** * qemuMigrationSrcNBDCopyCancel: - * @driver: qemu driver * @vm: domain * @check: if true report an error when some of the mirrors fails * @@ -695,8 +686,7 @@ qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver, * Returns 0 on success, -1 otherwise. */ static int -qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDCopyCancel(virDomainObjPtr vm, bool check, qemuDomainAsyncJob asyncJob, virConnectPtr dconn) @@ -723,7 +713,7 @@ qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver, continue; } - rv = qemuMigrationSrcNBDCopyCancelOne(driver, vm, disk, job, + rv = qemuMigrationSrcNBDCopyCancelOne(vm, disk, job, check, asyncJob); if (rv != 0) { if (rv < 0) { @@ -766,7 +756,7 @@ qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver, if (!diskPriv->migrSource) continue; - qemuBlockStorageSourceDetachOneBlockdev(driver, vm, asyncJob, + qemuBlockStorageSourceDetachOneBlockdev(vm, asyncJob, diskPriv->migrSource); virObjectUnref(diskPriv->migrSource); diskPriv->migrSource = NULL; @@ -818,8 +808,7 @@ qemuMigrationSrcNBDStorageCopyBlockdevPrepareSource(virDomainDiskDefPtr disk, static int -qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDStorageCopyBlockdev(virDomainObjPtr vm, virDomainDiskDefPtr disk, const char *jobname, const char *sourcename, @@ -847,8 +836,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, false))) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) return -1; mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data); @@ -861,7 +849,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, if (mon_ret != 0) qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || mon_ret < 0) return -1; diskPriv->migrSource = g_steal_pointer(©src); @@ -871,8 +859,7 @@ qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver, static int -qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDStorageCopyDriveMirror(virDomainObjPtr vm, const char *diskAlias, const char *host, int port, @@ -890,15 +877,14 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver, diskAlias); } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) return -1; mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm), diskAlias, nbd_dest, "raw", mirror_speed, 0, 0, mirror_shallow, true); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || mon_ret < 0) return -1; return 0; @@ -906,8 +892,7 @@ qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver, static int -qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcNBDStorageCopyOne(virDomainObjPtr vm, virDomainDiskDefPtr disk, const char *host, int port, @@ -946,15 +931,14 @@ qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_TLS || virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) { - rc = qemuMigrationSrcNBDStorageCopyBlockdev(driver, vm, - disk, jobname, + rc = qemuMigrationSrcNBDStorageCopyBlockdev(vm, disk, jobname, sourcename, persistjob, host, port, mirror_speed, mirror_shallow, tlsAlias); } else { - rc = qemuMigrationSrcNBDStorageCopyDriveMirror(driver, vm, diskAlias, + rc = qemuMigrationSrcNBDStorageCopyDriveMirror(vm, diskAlias, host, port, mirror_speed, mirror_shallow); @@ -1037,7 +1021,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks)) continue; - if (qemuMigrationSrcNBDStorageCopyOne(driver, vm, disk, host, port, + if (qemuMigrationSrcNBDStorageCopyOne(vm, disk, host, port, mirror_speed, mirror_shallow, tlsAlias, flags) < 0) return -1; @@ -1070,7 +1054,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver, return -1; } - qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationSrcFetchMirrorStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->current); /* Okay, all disks are ready. Modify migrate_flags */ @@ -1492,8 +1476,7 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) int -qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationAnyFetchStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo, char **error) @@ -1502,12 +1485,12 @@ qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, qemuMonitorMigrationStats stats; int rv; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rv = qemuMonitorGetMigrationStats(priv->mon, &stats, error); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; jobInfo->stats.mig = stats; @@ -1546,8 +1529,7 @@ qemuMigrationJobName(virDomainObjPtr vm) static int -qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobCheckStatus(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -1559,7 +1541,7 @@ qemuMigrationJobCheckStatus(virQEMUDriverPtr driver, if (!events || jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { - if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0) + if (qemuMigrationAnyFetchStats(vm, asyncJob, jobInfo, &error) < 0) return -1; } @@ -1615,8 +1597,7 @@ enum qemuMigrationCompletedFlags { * -2 something else failed, we need to cancel migration. */ static int -qemuMigrationAnyCompleted(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationAnyCompleted(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virConnectPtr dconn, unsigned int flags) @@ -1626,7 +1607,7 @@ qemuMigrationAnyCompleted(virQEMUDriverPtr driver, qemuDomainJobInfoPtr jobInfo = jobPriv->current; int pauseReason; - if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0) + if (qemuMigrationJobCheckStatus(vm, asyncJob) < 0) goto error; /* This flag should only be set when run on src host */ @@ -1707,8 +1688,7 @@ qemuMigrationAnyCompleted(virQEMUDriverPtr driver, * QEMU reports failed migration. */ static int -qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcWaitForCompletion(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virConnectPtr dconn, unsigned int flags) @@ -1721,7 +1701,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; - while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob, + while ((rv = qemuMigrationAnyCompleted(vm, asyncJob, dconn, flags)) != 1) { if (rv < 0) return rv; @@ -1743,7 +1723,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, } if (events) - ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL)); + ignore_value(qemuMigrationAnyFetchStats(vm, asyncJob, jobInfo, NULL)); qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobInfoUpdateDowntime(jobInfo); @@ -1760,8 +1740,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver, static int -qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstWaitForCompletion(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, bool postcopy) { @@ -1777,7 +1756,7 @@ qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver, if (postcopy) flags = QEMU_MIGRATION_COMPLETED_POSTCOPY; - while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob, + while ((rv = qemuMigrationAnyCompleted(vm, asyncJob, NULL, flags)) != 1) { if (rv < 0 || virDomainObjWait(vm) < 0) return -1; @@ -1788,8 +1767,7 @@ qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver, static int -qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcGraphicsRelocate(virDomainObjPtr vm, qemuMigrationCookiePtr cookie, const char *graphicsuri) { @@ -1871,14 +1849,13 @@ qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver, goto cleanup; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress, port, tlsPort, tlsSubject); jobPriv->spiceMigration = !ret; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; } @@ -1963,8 +1940,7 @@ qemuMigrationDstGetURI(const char *migrateFrom, int -qemuMigrationDstRun(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstRun(virDomainObjPtr vm, const char *uri, qemuDomainAsyncJob asyncJob) { @@ -1973,7 +1949,7 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, VIR_DEBUG("Setting up incoming migration with URI %s", uri); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rv = qemuMonitorSetDBusVMStateIdList(priv->mon, @@ -1984,7 +1960,7 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, rv = qemuMonitorMigrateIncoming(priv->mon, uri); exit_monitor: - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rv < 0) return -1; if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) { @@ -1992,7 +1968,7 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, return 0; } - if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0) + if (qemuMigrationDstWaitForCompletion(vm, asyncJob, false) < 0) return -1; return 0; @@ -2008,9 +1984,8 @@ qemuMigrationDstRun(virQEMUDriverPtr driver, static void qemuMigrationSrcCleanup(virDomainObjPtr vm, virConnectPtr conn, - void *opaque) + void *opaque G_GNUC_UNUSED) { - virQEMUDriverPtr driver = opaque; qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; @@ -2030,17 +2005,17 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, switch ((qemuMigrationJobPhase) priv->job.phase) { case QEMU_MIGRATION_PHASE_BEGIN3: /* just forget we were about to migrate */ - qemuDomainObjDiscardAsyncJob(driver, vm); + qemuDomainObjDiscardAsyncJob(vm); break; case QEMU_MIGRATION_PHASE_PERFORM3_DONE: VIR_WARN("Migration of domain %s finished but we don't know if the" " domain was successfully started on destination or not", vm->def->name); - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); /* clear the job and let higher levels decide what to do */ - qemuDomainObjDiscardAsyncJob(driver, vm); + qemuDomainObjDiscardAsyncJob(vm); break; case QEMU_MIGRATION_PHASE_PERFORM3: @@ -2091,7 +2066,7 @@ qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver, * change protection. */ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3); if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags)) return NULL; @@ -2233,12 +2208,12 @@ qemuMigrationSrcBegin(virConnectPtr conn, qemuDomainAsyncJob asyncJob; if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) goto cleanup; asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT; } else { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; asyncJob = QEMU_ASYNC_JOB_NONE; } @@ -2252,7 +2227,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, * We don't want to require them on the destination. */ if (!(flags & VIR_MIGRATE_OFFLINE) && - qemuProcessRefreshDisks(driver, vm, asyncJob) < 0) + qemuProcessRefreshDisks(vm, asyncJob) < 0) goto endjob; if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname, @@ -2281,9 +2256,9 @@ qemuMigrationSrcBegin(virConnectPtr conn, endjob: if (flags & VIR_MIGRATE_CHANGE_PROTECTION) - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); else - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); goto cleanup; } @@ -2308,7 +2283,7 @@ qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver, if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) return; - qemuDomainObjDiscardAsyncJob(driver, vm); + qemuDomainObjDiscardAsyncJob(vm); } static qemuProcessIncomingDefPtr @@ -2547,10 +2522,9 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0) goto cleanup; - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, - flags) < 0) + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_IN, flags) < 0) goto cleanup; - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PREPARE); /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; @@ -2607,7 +2581,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, goto stopjob; } - if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_IN, migParams, mig->caps->automatic) < 0) goto stopjob; @@ -2624,7 +2598,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, goto stopjob; } - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_IN, migParams) < 0) goto stopjob; @@ -2661,7 +2635,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, } if (incoming->deferredURI && - qemuMigrationDstRun(driver, vm, incoming->deferredURI, + qemuMigrationDstRun(vm, incoming->deferredURI, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) goto stopjob; @@ -2731,7 +2705,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, return ret; stopjob: - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN, jobPriv->migParams, priv->job.apiFlags); if (stopProcess) { @@ -2743,7 +2717,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags); } - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); goto cleanup; } @@ -3011,8 +2985,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virCheckFlags(QEMU_MIGRATION_FLAGS, -1); - qemuMigrationJobSetPhase(driver, vm, - retcode == 0 + qemuMigrationJobSetPhase(vm, retcode == 0 ? QEMU_MIGRATION_PHASE_CONFIRM3 : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED); @@ -3036,7 +3009,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, */ if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY && - qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationAnyFetchStats(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobInfo, NULL) < 0) VIR_WARN("Could not refresh migration statistics"); @@ -3075,8 +3048,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); /* cancel any outstanding NBD jobs */ - qemuMigrationSrcNBDCopyCancel(driver, vm, false, - QEMU_ASYNC_JOB_MIGRATION_OUT, NULL); + qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_MIGRATION_OUT, NULL); virErrorRestore(&orig_err); @@ -3086,7 +3058,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, else qemuMigrationSrcRestoreDomainState(driver, vm); - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) @@ -3118,7 +3090,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, else phase = QEMU_MIGRATION_PHASE_CONFIRM3; - qemuMigrationJobStartPhase(driver, vm, phase); + qemuMigrationJobStartPhase(vm, phase); virCloseCallbacksUnset(driver->closeCallbacks, vm, qemuMigrationSrcCleanup); @@ -3126,7 +3098,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, cookiein, cookieinlen, flags, cancelled); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); if (!virDomainObjIsActive(vm)) { if (!cancelled && ret == 0 && flags & VIR_MIGRATE_UNDEFINE_SOURCE) { virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm); @@ -3412,20 +3384,19 @@ qemuMigrationSrcConnect(virQEMUDriverPtr driver, static int -qemuMigrationSrcContinue(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcContinue(virDomainObjPtr vm, qemuMonitorMigrationStatus status, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorMigrateContinue(priv->mon, status); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -3444,18 +3415,18 @@ qemuMigrationSetDBusVMState(virQEMUDriverPtr driver, if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; rv = qemuMonitorSetDBusVMStateIdList(priv->mon, (const char **)priv->dbusVMStateIds); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rv = -1; return rv; } else { - if (qemuHotplugRemoveDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuHotplugRemoveDBusVMState(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; } @@ -3555,10 +3526,10 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (!mig) goto error; - if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0) + if (qemuMigrationSrcGraphicsRelocate(vm, mig, graphicsuri) < 0) VIR_WARN("unable to provide data for graphics client relocation"); - if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationParamsCheck(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, migParams, mig->caps->automatic) < 0) goto error; @@ -3586,7 +3557,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, migrate_speed * 1024 * 1024) < 0) goto error; - if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationParamsApply(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, migParams) < 0) goto error; @@ -3633,8 +3604,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, goto error; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; if (priv->job.abortJob) { @@ -3690,7 +3660,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, break; } - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto error; /* From this point onwards we *must* call cancel to abort the @@ -3714,8 +3684,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_POSTCOPY) waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY; - rc = qemuMigrationSrcWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT, + rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn, waitFlags); if (rc == -2) { goto error; @@ -3737,7 +3706,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, } if (mig->nbd && - qemuMigrationSrcNBDCopyCancel(driver, vm, true, + qemuMigrationSrcNBDCopyCancel(vm, true, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn) < 0) goto error; @@ -3747,15 +3716,13 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, * end of the migration. */ if (jobPriv->current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { - if (qemuMigrationSrcContinue(driver, vm, - QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, + if (qemuMigrationSrcContinue(vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto error; waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER; - rc = qemuMigrationSrcWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT, + rc = qemuMigrationSrcWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn, waitFlags); if (rc == -2) { goto error; @@ -3807,15 +3774,14 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, if (virDomainObjIsActive(vm)) { if (cancel && jobPriv->current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && - qemuDomainObjEnterMonitorAsync(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { + qemuDomainObjEnterMonitorAsync(vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMonitorMigrateCancel(priv->mon); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } /* cancel any outstanding NBD jobs */ if (mig && mig->nbd) - qemuMigrationSrcNBDCopyCancel(driver, vm, false, + qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_MIGRATION_OUT, dconn); @@ -3829,7 +3795,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver, goto cleanup; exit_monitor: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto error; } @@ -4064,7 +4030,7 @@ qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver, * until the migration is complete. */ VIR_DEBUG("Perform %p", sconn); - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM2); if (flags & VIR_MIGRATE_TUNNELLED) ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL, NULL, 0, NULL, NULL, @@ -4302,7 +4268,7 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver, * confirm migration completion. */ VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri)); - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3); VIR_FREE(cookiein); cookiein = g_steal_pointer(&cookieout); cookieinlen = cookieoutlen; @@ -4327,8 +4293,7 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver, if (ret < 0) { virErrorPreserveLast(&orig_err); } else { - qemuMigrationJobSetPhase(driver, vm, - QEMU_MIGRATION_PHASE_PERFORM3_DONE); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE); } /* If Perform returns < 0, then we need to cancel the VM @@ -4668,8 +4633,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainJobPrivatePtr jobPriv = priv->job.privateData; - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - flags) < 0) + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) goto cleanup; if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0) @@ -4692,7 +4656,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, migParams, flags, dname, resource, &v3proto); } else { - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM2); ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, NULL, NULL, 0, NULL, @@ -4723,12 +4687,12 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, * here */ if (!v3proto && ret < 0) - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); qemuMigrationSrcRestoreDomainState(driver, vm); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); if (!virDomainObjIsActive(vm) && ret == 0) { if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) { virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm); @@ -4770,14 +4734,14 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, /* If we didn't start the job in the begin phase, start it now. */ if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) { - if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, flags) < 0) return ret; } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) { return ret; } - qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3); + qemuMigrationJobStartPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3); virCloseCallbacksUnset(driver->closeCallbacks, vm, qemuMigrationSrcCleanup); @@ -4791,7 +4755,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, goto endjob; } - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE); + qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE); if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn, qemuMigrationSrcCleanup) < 0) @@ -4799,9 +4763,9 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, endjob: if (ret < 0) { - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); } else { qemuMigrationJobContinue(vm); } @@ -5023,7 +4987,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, ignore_value(virTimeMillisNow(&timeReceived)); - qemuMigrationJobStartPhase(driver, vm, + qemuMigrationJobStartPhase(vm, v3proto ? QEMU_MIGRATION_PHASE_FINISH3 : QEMU_MIGRATION_PHASE_FINISH2); @@ -5052,7 +5016,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* Check for a possible error on the monitor in case Finish was called * earlier than monitor EOF handler got a chance to process the error */ - qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN); + qemuDomainCheckMonitor(vm, QEMU_ASYNC_JOB_MIGRATION_IN); goto endjob; } @@ -5069,7 +5033,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide network data for relocation"); - if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0) + if (qemuMigrationDstStopNBDServer(vm, mig) < 0) goto endjob; if (qemuRefreshVirtioChannelState(driver, vm, @@ -5101,8 +5065,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, /* We need to wait for QEMU to process all data sent by the source * before starting guest CPUs. */ - if (qemuMigrationDstWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN, !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) { /* There's not much we can do for v2 protocol since the * original domain on the source host is already gone. @@ -5170,8 +5133,7 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, } if (inPostCopy) { - if (qemuMigrationDstWaitForCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN, + if (qemuMigrationDstWaitForCompletion(vm, QEMU_ASYNC_JOB_MIGRATION_IN, false) < 0) { goto endjob; } @@ -5252,10 +5214,10 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); } - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_IN, jobPriv->migParams, priv->job.apiFlags); - qemuMigrationJobFinish(driver, vm); + qemuMigrationJobFinish(vm); if (!virDomainObjIsActive(vm)) qemuDomainRemoveInactiveJob(driver, vm); @@ -5308,16 +5270,16 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024) < 0) return -1; - if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0) + if (qemuMigrationParamsApply(vm, asyncJob, migParams) < 0) return -1; priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX; } else { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMonitorSetMigrationSpeed(priv->mon, QEMU_DOMAIN_MIG_BANDWIDTH_MAX); priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; } } @@ -5340,7 +5302,7 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, compressor ? pipeFD[1] : fd) < 0) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; if (!compressor) { @@ -5355,11 +5317,11 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, if (virSetCloseExec(pipeFD[1]) < 0) { virReportSystemError(errno, "%s", _("Unable to set cloexec flag")); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } if (virCommandRunAsync(compressor, NULL) < 0) { - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); goto cleanup; } rc = qemuMonitorMigrateToFd(priv->mon, @@ -5369,21 +5331,21 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, VIR_CLOSE(pipeFD[1]) < 0) VIR_WARN("failed to close intermediate pipe"); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) goto cleanup; - rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0); + rc = qemuMigrationSrcWaitForCompletion(vm, asyncJob, NULL, 0); if (rc < 0) { if (rc == -2) { virErrorPreserveLast(&orig_err); virCommandAbort(compressor); if (virDomainObjIsActive(vm) && - qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMonitorMigrateCancel(priv->mon); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } goto cleanup; @@ -5405,12 +5367,12 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, if (qemuMigrationParamsSetULL(migParams, QEMU_MIGRATION_PARAM_MAX_BANDWIDTH, saveMigBandwidth * 1024 * 1024) == 0) - ignore_value(qemuMigrationParamsApply(driver, vm, asyncJob, + ignore_value(qemuMigrationParamsApply(vm, asyncJob, migParams)); } else { - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth); - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); } } priv->migMaxBandwidth = saveMigBandwidth; @@ -5430,8 +5392,7 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm, int -qemuMigrationSrcCancel(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuMigrationSrcCancel(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; bool storage = false; @@ -5440,9 +5401,9 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", vm->def->name); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorMigrateCancel(priv->mon)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; for (i = 0; i < vm->def->ndisks; i++) { @@ -5463,7 +5424,7 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, } if (storage && - qemuMigrationSrcNBDCopyCancel(driver, vm, false, + qemuMigrationSrcNBDCopyCancel(vm, false, QEMU_ASYNC_JOB_NONE, NULL) < 0) return -1; @@ -5472,8 +5433,7 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver, static int -qemuMigrationJobStart(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStart(virDomainObjPtr vm, qemuDomainAsyncJob job, unsigned long apiFlags) { @@ -5492,7 +5452,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, JOB_MASK(QEMU_JOB_MIGRATION_OP); } - if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) + if (qemuDomainObjBeginAsyncJob(vm, job, op, apiFlags) < 0) return -1; jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; @@ -5502,8 +5462,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, } static void -qemuMigrationJobSetPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobSetPhase(virDomainObjPtr vm, qemuMigrationJobPhase phase) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -5515,15 +5474,14 @@ qemuMigrationJobSetPhase(virQEMUDriverPtr driver, return; } - qemuDomainObjSetJobPhase(driver, vm, phase); + qemuDomainObjSetJobPhase(vm, phase); } static void -qemuMigrationJobStartPhase(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationJobStartPhase(virDomainObjPtr vm, qemuMigrationJobPhase phase) { - qemuMigrationJobSetPhase(driver, vm, phase); + qemuMigrationJobSetPhase(vm, phase); } static void @@ -5553,9 +5511,9 @@ qemuMigrationJobIsActive(virDomainObjPtr vm, } static void -qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm) +qemuMigrationJobFinish(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); } @@ -5612,8 +5570,7 @@ qemuMigrationDstErrorReport(virQEMUDriverPtr driver, int -qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo) { @@ -5634,12 +5591,12 @@ qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver, if (!nbd) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon, false); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo) + if (qemuDomainObjExitMonitor(vm) < 0 || !blockinfo) return -1; memset(stats, 0, sizeof(*stats)); diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index b6f88d3fd9..e99351ef82 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -210,12 +210,10 @@ qemuMigrationSrcToFile(virQEMUDriverPtr driver, ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; int -qemuMigrationSrcCancel(virQEMUDriverPtr driver, - virDomainObjPtr vm); +qemuMigrationSrcCancel(virDomainObjPtr vm); int -qemuMigrationAnyFetchStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationAnyFetchStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo, char **error); @@ -241,8 +239,7 @@ qemuMigrationDstGetURI(const char *migrateFrom, int migrateFd); int -qemuMigrationDstRun(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationDstRun(virDomainObjPtr vm, const char *uri, qemuDomainAsyncJob asyncJob); @@ -251,7 +248,6 @@ qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver, virDomainObjPtr vm); int -qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationSrcFetchMirrorStats(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, qemuDomainJobInfoPtr jobInfo); diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index a0e8cba8ba..949f2a2d97 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -450,7 +450,6 @@ qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig, static int qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig, - virQEMUDriverPtr driver, virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -473,13 +472,13 @@ qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig, mig->nbd->disks = g_new0(struct qemuMigrationCookieNBDDisk, vm->def->ndisks); mig->nbd->ndisks = 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, priv->job.asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, priv->job.asyncJob) < 0) return -1; if (blockdev) rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats); else rc = qemuMonitorBlockStatsUpdateCapacity(priv->mon, stats, false); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; for (i = 0; i < vm->def->ndisks; i++) { @@ -1421,7 +1420,7 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, } if ((flags & QEMU_MIGRATION_COOKIE_NBD) && - qemuMigrationCookieAddNBD(mig, driver, dom) < 0) + qemuMigrationCookieAddNBD(mig, dom) < 0) return -1; if (flags & QEMU_MIGRATION_COOKIE_STATS && diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c index 04434e9557..9252ce50d5 100644 --- a/src/qemu/qemu_migration_params.c +++ b/src/qemu/qemu_migration_params.c @@ -819,7 +819,6 @@ qemuMigrationCapsToJSON(virBitmapPtr caps, /** * qemuMigrationParamsApply - * @driver: qemu driver * @vm: domain object * @asyncJob: migration job * @migParams: migration parameters to send to QEMU @@ -829,8 +828,7 @@ qemuMigrationCapsToJSON(virBitmapPtr caps, * Returns 0 on success, -1 on failure. */ int -qemuMigrationParamsApply(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsApply(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams) { @@ -842,7 +840,7 @@ qemuMigrationParamsApply(virQEMUDriverPtr driver, int ret = -1; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (asyncJob == QEMU_ASYNC_JOB_NONE) { @@ -890,7 +888,7 @@ qemuMigrationParamsApply(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (xbzrleCacheSize_old) @@ -991,9 +989,9 @@ qemuMigrationParamsEnableTLS(virQEMUDriverPtr driver, * This should prevent any issues just in case some cleanup wasn't * properly completed (both src and dst use the same alias) or * some other error path between now and perform . */ - qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, *tlsAlias); + qemuDomainDelTLSObjects(vm, asyncJob, secAlias, *tlsAlias); - if (qemuDomainAddTLSObjects(driver, vm, asyncJob, &secProps, &tlsProps) < 0) + if (qemuDomainAddTLSObjects(vm, asyncJob, &secProps, &tlsProps) < 0) return -1; if (qemuMigrationParamsSetString(migParams, @@ -1042,7 +1040,6 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, /* qemuMigrationParamsResetTLS - * @driver: pointer to qemu driver * @vm: domain object * @asyncJob: migration job to join * @apiFlags: API flags used to start the migration @@ -1051,8 +1048,7 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, * security objects and free the secinfo */ static void -qemuMigrationParamsResetTLS(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsResetTLS(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr origParams, unsigned long apiFlags) @@ -1069,14 +1065,13 @@ qemuMigrationParamsResetTLS(virQEMUDriverPtr driver, tlsAlias = qemuAliasTLSObjFromSrcAlias(QEMU_MIGRATION_TLS_ALIAS_BASE); secAlias = qemuAliasForSecret(QEMU_MIGRATION_TLS_ALIAS_BASE, NULL); - qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, tlsAlias); + qemuDomainDelTLSObjects(vm, asyncJob, secAlias, tlsAlias); g_clear_pointer(&QEMU_DOMAIN_PRIVATE(vm)->migSecinfo, qemuDomainSecretInfoFree); } int -qemuMigrationParamsFetch(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsFetch(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr *migParams) { @@ -1086,12 +1081,12 @@ qemuMigrationParamsFetch(virQEMUDriverPtr driver, *migParams = NULL; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetMigrationParams(priv->mon, &jsonParams); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (!(*migParams = qemuMigrationParamsFromJSON(jsonParams))) @@ -1145,8 +1140,7 @@ qemuMigrationParamsGetULL(qemuMigrationParamsPtr migParams, * are unsupported by QEMU. */ int -qemuMigrationParamsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsCheck(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams, virBitmapPtr remoteCaps) @@ -1206,7 +1200,7 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver, * to ask QEMU for their current settings. */ - return qemuMigrationParamsFetch(driver, vm, asyncJob, &jobPriv->migParams); + return qemuMigrationParamsFetch(vm, asyncJob, &jobPriv->migParams); } @@ -1217,8 +1211,7 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver, * migration (save, managedsave, snapshots, dump) will not try to use them. */ void -qemuMigrationParamsReset(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsReset(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr origParams, unsigned long apiFlags) @@ -1233,10 +1226,10 @@ qemuMigrationParamsReset(virQEMUDriverPtr driver, if (!virDomainObjIsActive(vm) || !origParams) goto cleanup; - if (qemuMigrationParamsApply(driver, vm, asyncJob, origParams) < 0) + if (qemuMigrationParamsApply(vm, asyncJob, origParams) < 0) goto cleanup; - qemuMigrationParamsResetTLS(driver, vm, asyncJob, origParams, apiFlags); + qemuMigrationParamsResetTLS(vm, asyncJob, origParams, apiFlags); cleanup: virErrorRestore(&err); @@ -1384,8 +1377,7 @@ qemuMigrationParamsParse(xmlXPathContextPtr ctxt, int -qemuMigrationCapsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationCapsCheck(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -1396,12 +1388,12 @@ qemuMigrationCapsCheck(virQEMUDriverPtr driver, int ret = -1; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetMigrationCapabilities(priv->mon, &caps); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) goto cleanup; if (!caps) { @@ -1434,13 +1426,13 @@ qemuMigrationCapsCheck(virQEMUDriverPtr driver, if (!(json = qemuMigrationCapsToJSON(migEvent, migEvent))) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorSetMigrationCapabilities(priv->mon, json); json = NULL; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) { diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h index 9aea24725f..231f4db90b 100644 --- a/src/qemu/qemu_migration_params.h +++ b/src/qemu/qemu_migration_params.h @@ -95,8 +95,7 @@ qemuMigrationParamsFree(qemuMigrationParamsPtr migParams); G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuMigrationParams, qemuMigrationParamsFree); int -qemuMigrationParamsApply(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsApply(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams); @@ -114,8 +113,7 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm, qemuMigrationParamsPtr migParams); int -qemuMigrationParamsFetch(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsFetch(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr *migParams); @@ -130,15 +128,13 @@ qemuMigrationParamsGetULL(qemuMigrationParamsPtr migParams, unsigned long long *value); int -qemuMigrationParamsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsCheck(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr migParams, virBitmapPtr remoteCaps); void -qemuMigrationParamsReset(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationParamsReset(virDomainObjPtr vm, int asyncJob, qemuMigrationParamsPtr origParams, unsigned long apiFlags); @@ -152,8 +148,7 @@ qemuMigrationParamsParse(xmlXPathContextPtr ctxt, qemuMigrationParamsPtr *migParams); int -qemuMigrationCapsCheck(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuMigrationCapsCheck(virDomainObjPtr vm, int asyncJob); bool diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 1c7c0ba19a..7e4f5e2cfc 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -422,7 +422,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, if (vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY || vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE) { - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -436,7 +436,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainAuditStop(vm, "destroyed"); qemuDomainRemoveInactive(driver, vm); endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); } ret = 0; @@ -467,7 +467,7 @@ qemuProcessFakeReboot(void *opaque) VIR_DEBUG("vm=%p", vm); virObjectLock(vm); - if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -476,10 +476,10 @@ qemuProcessFakeReboot(void *opaque) goto endjob; } - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); rc = qemuMonitorSystemReset(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto endjob; if (rc < 0) @@ -505,7 +505,7 @@ qemuProcessFakeReboot(void *opaque) ret = 0; endjob: - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: priv->pausedShutdown = false; @@ -1947,18 +1947,17 @@ qemuProcessMonitorLogFree(void *opaque) static int -qemuProcessInitMonitor(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessInitMonitor(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { int ret; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; ret = qemuMonitorSetCapabilities(QEMU_DOMAIN_PRIVATE(vm)->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; @@ -2017,10 +2016,10 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, return -1; } - if (qemuProcessInitMonitor(driver, vm, asyncJob) < 0) + if (qemuProcessInitMonitor(vm, asyncJob) < 0) return -1; - if (qemuMigrationCapsCheck(driver, vm, asyncJob) < 0) + if (qemuMigrationCapsCheck(vm, asyncJob) < 0) return -1; return 0; @@ -2255,11 +2254,11 @@ qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, virHashTablePtr info = NULL; int ret = -1; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ret = qemuMonitorGetChardevInfo(priv->mon, &info); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -2300,8 +2299,7 @@ qemuProcessRefreshPRManagerState(virDomainObjPtr vm, static int -qemuRefreshPRManagerState(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuRefreshPRManagerState(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; virHashTablePtr info = NULL; @@ -2311,9 +2309,9 @@ qemuRefreshPRManagerState(virQEMUDriverPtr driver, !qemuDomainDefHasManagedPR(vm)) return 0; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetPRManagerInfo(priv->mon, &info); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -2328,8 +2326,7 @@ qemuRefreshPRManagerState(virQEMUDriverPtr driver, static void -qemuRefreshRTC(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuRefreshRTC(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; time_t now, then; @@ -2341,10 +2338,10 @@ qemuRefreshRTC(virQEMUDriverPtr driver, return; memset(&thenbits, 0, sizeof(thenbits)); - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); now = time(NULL); rv = qemuMonitorGetRTCTime(priv->mon, &thenbits); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) rv = -1; if (rv < 0) @@ -2365,8 +2362,7 @@ qemuRefreshRTC(virQEMUDriverPtr driver, } int -qemuProcessRefreshBalloonState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessRefreshBalloonState(virDomainObjPtr vm, int asyncJob) { unsigned long long balloon; @@ -2379,11 +2375,11 @@ qemuProcessRefreshBalloonState(virQEMUDriverPtr driver, return 0; } - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetBalloonInfo(qemuDomainGetMonitor(vm), &balloon); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; vm->def->mem.cur_balloon = balloon; @@ -2417,11 +2413,11 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, * reliable if it's available. * Note that the monitor itself can be on a pty, so we still need to try the * log output method. */ - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ret = qemuMonitorGetChardevInfo(priv->mon, &info); VIR_DEBUG("qemuMonitorGetChardevInfo returned %i", ret); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret == 0) { @@ -2445,8 +2441,7 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, static int -qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessDetectIOThreadPIDs(virDomainObjPtr vm, int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -2461,10 +2456,10 @@ qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver, } /* Get the list of IOThreads from qemu */ - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (niothreads < 0) goto cleanup; @@ -2589,8 +2584,7 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm G_GNUC_UNUSED) /* set link states to down on interfaces at qemu start */ static int -qemuProcessSetLinkStates(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetLinkStates(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -2599,7 +2593,7 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, int ret = -1; int rv; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; for (i = 0; i < def->nnets; i++) { @@ -2627,7 +2621,7 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; } @@ -2978,14 +2972,12 @@ qemuProcessInitPasswords(virQEMUDriverPtr driver, for (i = 0; i < vm->def->ngraphics; ++i) { virDomainGraphicsDefPtr graphics = vm->def->graphics[i]; if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) { - ret = qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_VNC, + ret = qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_VNC, &graphics->data.vnc.auth, cfg->vncPassword, asyncJob); } else if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) { - ret = qemuDomainChangeGraphicsPasswords(driver, vm, - VIR_DOMAIN_GRAPHICS_TYPE_SPICE, + ret = qemuDomainChangeGraphicsPasswords(vm, VIR_DOMAIN_GRAPHICS_TYPE_SPICE, &graphics->data.spice.auth, cfg->spicePassword, asyncJob); @@ -3057,7 +3049,7 @@ qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver, virDomainVideoDefPtr video = NULL; g_autoptr(virQEMUDriverConfig) cfg = NULL; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; for (i = 0; i < vm->def->nvideos; i++) { @@ -3109,7 +3101,7 @@ qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver, } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; cfg = virQEMUDriverGetConfig(driver); @@ -3118,7 +3110,7 @@ qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver, return ret; error: - ignore_value(qemuDomainObjExitMonitor(driver, vm)); + ignore_value(qemuDomainObjExitMonitor(vm)); return -1; } @@ -3237,11 +3229,11 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, priv->runningReason = reason; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto release; ret = qemuMonitorStartCPUs(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -3276,11 +3268,11 @@ int qemuProcessStopCPUs(virQEMUDriverPtr driver, priv->pausedReason = reason; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; ret = qemuMonitorStopCPUs(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; if (ret < 0) @@ -3360,7 +3352,7 @@ qemuProcessFiltersInstantiate(virDomainDefPtr def) } static int -qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm) +qemuProcessUpdateState(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; virDomainState state; @@ -3372,9 +3364,9 @@ qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm) g_autofree char *msg = NULL; int ret; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ret = qemuMonitorGetStatus(priv->mon, &running, &reason); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (ret < 0) @@ -3479,7 +3471,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, break; } - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE, jobPriv->migParams, job->apiFlags); return 0; } @@ -3523,7 +3515,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } else { VIR_DEBUG("Cancelling unfinished migration of domain %s", vm->def->name); - if (qemuMigrationSrcCancel(driver, vm) < 0) { + if (qemuMigrationSrcCancel(vm) < 0) { VIR_WARN("Could not cancel ongoing migration of domain %s", vm->def->name); } @@ -3575,7 +3567,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } } - qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, + qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_NONE, jobPriv->migParams, job->apiFlags); return 0; } @@ -3612,9 +3604,9 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, case QEMU_ASYNC_JOB_SAVE: case QEMU_ASYNC_JOB_DUMP: case QEMU_ASYNC_JOB_SNAPSHOT: - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); ignore_value(qemuMonitorMigrateCancel(priv->mon)); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; /* resume the domain but only if it was paused as a result of * running a migration-to-file operation. Although we are @@ -3723,7 +3715,7 @@ qemuProcessUpdateDevices(virQEMUDriverPtr driver, old = priv->qemuDevices; priv->qemuDevices = NULL; - if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuDomainUpdateDeviceList(vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; qemuDevices = (const char **)priv->qemuDevices; @@ -4198,8 +4190,7 @@ qemuProcessTranslateCPUFeatures(const char *name, static int -qemuProcessFetchGuestCPU(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessFetchGuestCPU(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virCPUDataPtr *enabled, virCPUDataPtr *disabled) @@ -4218,7 +4209,7 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver, if (!generic && !ARCH_IS_X86(vm->def->os.arch)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (generic) { @@ -4230,7 +4221,7 @@ qemuProcessFetchGuestCPU(virQEMUDriverPtr driver, rc = qemuMonitorGetGuestCPUx86(priv->mon, &dataEnabled, &dataDisabled); } - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) return -1; if (rc == -1) @@ -4305,15 +4296,14 @@ qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm, static int -qemuProcessUpdateAndVerifyCPU(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessUpdateAndVerifyCPU(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { virCPUDataPtr cpu = NULL; virCPUDataPtr disabled = NULL; int ret = -1; - if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0) + if (qemuProcessFetchGuestCPU(vm, asyncJob, &cpu, &disabled) < 0) goto cleanup; if (qemuProcessVerifyCPU(vm, cpu) < 0) @@ -4332,8 +4322,7 @@ qemuProcessUpdateAndVerifyCPU(virQEMUDriverPtr driver, static int -qemuProcessFetchCPUDefinitions(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessFetchCPUDefinitions(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob, virDomainCapsCPUModelsPtr *cpuModels) { @@ -4341,12 +4330,12 @@ qemuProcessFetchCPUDefinitions(virQEMUDriverPtr driver, g_autoptr(virDomainCapsCPUModels) models = NULL; int rc; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = virQEMUCapsFetchCPUModels(priv->mon, vm->def->os.arch, &models); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; *cpuModels = g_steal_pointer(&models); @@ -4355,8 +4344,7 @@ qemuProcessFetchCPUDefinitions(virQEMUDriverPtr driver, static int -qemuProcessUpdateCPU(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessUpdateCPU(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { g_autoptr(virCPUData) cpu = NULL; @@ -4368,13 +4356,13 @@ qemuProcessUpdateCPU(virQEMUDriverPtr driver, */ vm->def->cpu->fallback = VIR_CPU_FALLBACK_ALLOW; - if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0) + if (qemuProcessFetchGuestCPU(vm, asyncJob, &cpu, &disabled) < 0) return -1; if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0) return -1; - if (qemuProcessFetchCPUDefinitions(driver, vm, asyncJob, &models) < 0 || + if (qemuProcessFetchCPUDefinitions(vm, asyncJob, &models) < 0 || virCPUTranslate(vm->def->os.arch, vm->def->cpu, models) < 0) return -1; @@ -4580,12 +4568,11 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, * parameter between qemuProcessBeginJob and qemuProcessEndJob. */ int -qemuProcessBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessBeginJob(virDomainObjPtr vm, virDomainJobOperation operation, unsigned long apiFlags) { - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START, + if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_START, operation, apiFlags) < 0) return -1; @@ -4595,10 +4582,9 @@ qemuProcessBeginJob(virQEMUDriverPtr driver, void -qemuProcessEndJob(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuProcessEndJob(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(driver, vm); + qemuDomainObjEndAsyncJob(vm); } @@ -5054,8 +5040,7 @@ qemuProcessSetupRawIO(virQEMUDriverPtr driver, static int -qemuProcessSetupBalloon(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetupBalloon(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { unsigned long long balloon = vm->def->mem.cur_balloon; @@ -5065,7 +5050,7 @@ qemuProcessSetupBalloon(virQEMUDriverPtr driver, if (!virDomainDefHasMemballoon(vm->def)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; if (vm->def->memballoon->period) @@ -5077,7 +5062,7 @@ qemuProcessSetupBalloon(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; } @@ -5920,8 +5905,7 @@ qemuProcessVcpusSortOrder(const void *a, static int -qemuProcessSetupHotpluggableVcpus(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetupHotpluggableVcpus(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def); @@ -5967,13 +5951,13 @@ qemuProcessSetupHotpluggableVcpus(virQEMUDriverPtr driver, if (!(vcpuprops = qemuBuildHotpluggableCPUProps(vcpu))) goto cleanup; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) goto cleanup; rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops); vcpuprops = NULL; - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; if (rc < 0) @@ -6604,8 +6588,7 @@ qemuProcessGenID(virDomainObjPtr vm, * Same hack is done in qemuDomainAttachDiskGeneric. */ static int -qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessSetupDiskThrottlingBlockdev(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -6617,7 +6600,7 @@ qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriverPtr driver, VIR_DEBUG("Setting up disk throttling for -blockdev via block_set_io_throttle"); - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; for (i = 0; i < vm->def->ndisks; i++) { @@ -6640,7 +6623,7 @@ qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriverPtr driver, ret = 0; cleanup: - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; return ret; } @@ -6930,23 +6913,23 @@ qemuProcessLaunch(virConnectPtr conn, goto cleanup; VIR_DEBUG("Verifying and updating provided guest CPU"); - if (qemuProcessUpdateAndVerifyCPU(driver, vm, asyncJob) < 0) + if (qemuProcessUpdateAndVerifyCPU(vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("setting up hotpluggable cpus"); if (qemuDomainHasHotpluggableStartupVcpus(vm->def)) { - if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, asyncJob, false) < 0) goto cleanup; if (qemuProcessValidateHotpluggableVcpus(vm->def) < 0) goto cleanup; - if (qemuProcessSetupHotpluggableVcpus(driver, vm, asyncJob) < 0) + if (qemuProcessSetupHotpluggableVcpus(vm, asyncJob) < 0) goto cleanup; } VIR_DEBUG("Refreshing VCPU info"); - if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0) + if (qemuDomainRefreshVcpuInfo(vm, asyncJob, false) < 0) goto cleanup; if (qemuDomainValidateVcpuInfo(vm) < 0) @@ -6955,7 +6938,7 @@ qemuProcessLaunch(virConnectPtr conn, qemuDomainVcpuPersistOrder(vm->def); VIR_DEBUG("Detecting IOThread PIDs"); - if (qemuProcessDetectIOThreadPIDs(driver, vm, asyncJob) < 0) + if (qemuProcessDetectIOThreadPIDs(vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("Setting global CPU cgroup (if required)"); @@ -6985,21 +6968,21 @@ qemuProcessLaunch(virConnectPtr conn, /* qemu doesn't support setting this on the command line, so * enter the monitor */ VIR_DEBUG("Setting network link states"); - if (qemuProcessSetLinkStates(driver, vm, asyncJob) < 0) + if (qemuProcessSetLinkStates(vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("Setting initial memory amount"); - if (qemuProcessSetupBalloon(driver, vm, asyncJob) < 0) + if (qemuProcessSetupBalloon(vm, asyncJob) < 0) goto cleanup; - if (qemuProcessSetupDiskThrottlingBlockdev(driver, vm, asyncJob) < 0) + if (qemuProcessSetupDiskThrottlingBlockdev(vm, asyncJob) < 0) goto cleanup; /* Since CPUs were not started yet, the balloon could not return the memory * to the host and thus cur_balloon needs to be updated so that GetXMLdesc * and friends return the correct size in case they can't grab the job */ if (!incoming && !snapshot && - qemuProcessRefreshBalloonState(driver, vm, asyncJob) < 0) + qemuProcessRefreshBalloonState(vm, asyncJob) < 0) goto cleanup; if (flags & VIR_QEMU_PROCESS_START_AUTODESTROY && @@ -7032,11 +7015,11 @@ qemuProcessRefreshState(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; VIR_DEBUG("Fetching list of active devices"); - if (qemuDomainUpdateDeviceList(driver, vm, asyncJob) < 0) + if (qemuDomainUpdateDeviceList(vm, asyncJob) < 0) return -1; VIR_DEBUG("Updating info of memory devices"); - if (qemuDomainUpdateMemoryDeviceInfo(driver, vm, asyncJob) < 0) + if (qemuDomainUpdateMemoryDeviceInfo(vm, asyncJob) < 0) return -1; VIR_DEBUG("Detecting actual memory size for video device"); @@ -7044,10 +7027,10 @@ qemuProcessRefreshState(virQEMUDriverPtr driver, return -1; VIR_DEBUG("Updating disk data"); - if (qemuProcessRefreshDisks(driver, vm, asyncJob) < 0) + if (qemuProcessRefreshDisks(vm, asyncJob) < 0) return -1; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && - qemuBlockNodeNamesDetect(driver, vm, asyncJob) < 0) + qemuBlockNodeNamesDetect(vm, asyncJob) < 0) return -1; return 0; @@ -7166,7 +7149,7 @@ qemuProcessStart(virConnectPtr conn, if (incoming) { if (incoming->deferredURI && - qemuMigrationDstRun(driver, vm, incoming->deferredURI, asyncJob) < 0) + qemuMigrationDstRun(vm, incoming->deferredURI, asyncJob) < 0) goto stop; } else { /* Refresh state of devices from QEMU. During migration this happens @@ -7294,8 +7277,7 @@ qemuProcessKill(virDomainObjPtr vm, unsigned int flags) * qemuProcessStop. */ int -qemuProcessBeginStopJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessBeginStopJob(virDomainObjPtr vm, qemuDomainJob job, bool forceKill) { @@ -7315,7 +7297,7 @@ qemuProcessBeginStopJob(virQEMUDriverPtr driver, /* Wake up anything waiting on domain condition */ virDomainObjBroadcast(vm); - if (qemuDomainObjBeginJob(driver, vm, job) < 0) + if (qemuDomainObjBeginJob(vm, job) < 0) goto cleanup; ret = 0; @@ -7356,7 +7338,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); if (asyncJob != QEMU_ASYNC_JOB_NONE) { - if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0) + if (qemuDomainObjBeginNestedJob(vm, asyncJob) < 0) goto cleanup; } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE && priv->job.asyncOwner == virThreadSelfID() && @@ -7662,7 +7644,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, endjob: if (asyncJob != QEMU_ASYNC_JOB_NONE) - qemuDomainObjEndJob(driver, vm); + qemuDomainObjEndJob(vm); cleanup: virErrorRestore(&orig_err); @@ -7687,12 +7669,12 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, if (priv->job.asyncJob) { VIR_DEBUG("vm=%s has long-term job active, cancelling", dom->def->name); - qemuDomainObjDiscardAsyncJob(driver, dom); + qemuDomainObjDiscardAsyncJob(dom); } VIR_DEBUG("Killing domain"); - if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0) + if (qemuProcessBeginStopJob(dom, QEMU_JOB_DESTROY, true) < 0) return; qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED, @@ -7705,7 +7687,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, qemuDomainRemoveInactive(driver, dom); - qemuDomainObjEndJob(driver, dom); + qemuDomainObjEndJob(dom); virObjectEventStateQueue(driver->domainEventState, event); } @@ -7738,8 +7720,7 @@ bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver, int -qemuProcessRefreshDisks(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessRefreshDisks(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -7748,9 +7729,9 @@ qemuProcessRefreshDisks(virQEMUDriverPtr driver, int ret = -1; size_t i; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) { + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { table = qemuMonitorGetBlockInfo(priv->mon); - if (qemuDomainObjExitMonitor(driver, vm) < 0) + if (qemuDomainObjExitMonitor(vm) < 0) goto cleanup; } @@ -7795,8 +7776,7 @@ qemuProcessRefreshDisks(virQEMUDriverPtr driver, static int -qemuProcessRefreshCPUMigratability(virQEMUDriverPtr driver, - virDomainObjPtr vm, +qemuProcessRefreshCPUMigratability(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; @@ -7815,12 +7795,12 @@ qemuProcessRefreshCPUMigratability(virQEMUDriverPtr driver, if (!ARCH_IS_X86(def->os.arch)) return 0; - if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) + if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0) return -1; rc = qemuMonitorGetCPUMigratable(priv->mon, &migratable); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0) + if (qemuDomainObjExitMonitor(vm) < 0 || rc < 0) return -1; if (rc == 1) @@ -7855,7 +7835,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver, if (!vm->def->cpu) return 0; - if (qemuProcessRefreshCPUMigratability(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshCPUMigratability(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; if (!(host = virQEMUDriverGetHostCPU(driver))) { @@ -7890,7 +7870,7 @@ qemuProcessRefreshCPU(virQEMUDriverPtr driver, if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0) return -1; - if (qemuProcessUpdateCPU(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessUpdateCPU(vm, QEMU_ASYNC_JOB_NONE) < 0) return -1; } else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) { /* We only try to fix CPUs when the libvirt/QEMU combo used to start @@ -7969,15 +7949,14 @@ qemuProcessRefreshLegacyBlockjob(void *payload, static int -qemuProcessRefreshLegacyBlockjobs(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuProcessRefreshLegacyBlockjobs(virDomainObjPtr vm) { virHashTablePtr blockJobs = NULL; int ret = -1; - qemuDomainObjEnterMonitor(driver, vm); + qemuDomainObjEnterMonitor(vm); blockJobs = qemuMonitorGetAllBlockJobInfo(qemuDomainGetMonitor(vm), true); - if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockJobs) + if (qemuDomainObjExitMonitor(vm) < 0 || !blockJobs) goto cleanup; if (virHashForEach(blockJobs, qemuProcessRefreshLegacyBlockjob, vm) < 0) @@ -7992,15 +7971,14 @@ qemuProcessRefreshLegacyBlockjobs(virQEMUDriverPtr driver, static int -qemuProcessRefreshBlockjobs(virQEMUDriverPtr driver, - virDomainObjPtr vm) +qemuProcessRefreshBlockjobs(virDomainObjPtr vm) { qemuDomainObjPrivatePtr priv = vm->privateData; if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) - return qemuBlockJobRefreshJobs(driver, vm); + return qemuBlockJobRefreshJobs(vm); else - return qemuProcessRefreshLegacyBlockjobs(driver, vm); + return qemuProcessRefreshLegacyBlockjobs(vm); } @@ -8055,7 +8033,7 @@ qemuProcessReconnect(void *opaque) cfg = virQEMUDriverGetConfig(driver); priv = obj->privateData; - if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(obj, QEMU_JOB_MODIFY) < 0) goto error; jobStarted = true; @@ -8139,7 +8117,7 @@ qemuProcessReconnect(void *opaque) goto error; } - if (qemuProcessUpdateState(driver, obj) < 0) + if (qemuProcessUpdateState(obj) < 0) goto error; state = virDomainObjGetState(obj, &reason); @@ -8188,12 +8166,12 @@ qemuProcessReconnect(void *opaque) if (qemuProcessRefreshCPU(driver, obj) < 0) goto error; - if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0) + if (qemuDomainRefreshVcpuInfo(obj, QEMU_ASYNC_JOB_NONE, true) < 0) goto error; qemuDomainVcpuPersistOrder(obj->def); - if (qemuProcessDetectIOThreadPIDs(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessDetectIOThreadPIDs(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0) @@ -8203,32 +8181,32 @@ qemuProcessReconnect(void *opaque) qemuProcessFiltersInstantiate(obj->def); - if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshDisks(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) && - qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + qemuBlockNodeNamesDetect(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; /* If querying of guest's RTC failed, report error, but do not kill the domain. */ - qemuRefreshRTC(driver, obj); + qemuRefreshRTC(obj); - if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + if (qemuProcessRefreshBalloonState(obj, QEMU_ASYNC_JOB_NONE) < 0) goto error; if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0) goto error; - if (qemuProcessRefreshBlockjobs(driver, obj) < 0) + if (qemuProcessRefreshBlockjobs(obj) < 0) goto error; if (qemuProcessUpdateDevices(driver, obj) < 0) goto error; - if (qemuRefreshPRManagerState(driver, obj) < 0) + if (qemuRefreshPRManagerState(obj) < 0) goto error; qemuProcessReconnectCheckMemAliasOrderMismatch(obj); @@ -8282,7 +8260,7 @@ qemuProcessReconnect(void *opaque) if (jobStarted) { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactive(driver, obj); - qemuDomainObjEndJob(driver, obj); + qemuDomainObjEndJob(obj); } else { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactiveJob(driver, obj); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 125508f9fe..ddb42611c7 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -66,12 +66,10 @@ qemuProcessIncomingDefPtr qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, const char *path); void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr inc); -int qemuProcessBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessBeginJob(virDomainObjPtr vm, virDomainJobOperation operation, unsigned long apiFlags); -void qemuProcessEndJob(virQEMUDriverPtr driver, - virDomainObjPtr vm); +void qemuProcessEndJob(virDomainObjPtr vm); typedef enum { VIR_QEMU_PROCESS_START_COLD = 1 << 0, @@ -145,8 +143,7 @@ typedef enum { VIR_QEMU_PROCESS_STOP_NO_RELABEL = 1 << 1, } qemuProcessStopFlags; -int qemuProcessBeginStopJob(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessBeginStopJob(virDomainObjPtr vm, qemuDomainJob job, bool forceKill); void qemuProcessStop(virQEMUDriverPtr driver, @@ -195,12 +192,10 @@ int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); -int qemuProcessRefreshBalloonState(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessRefreshBalloonState(virDomainObjPtr vm, int asyncJob); -int qemuProcessRefreshDisks(virQEMUDriverPtr driver, - virDomainObjPtr vm, +int qemuProcessRefreshDisks(virDomainObjPtr vm, qemuDomainAsyncJob asyncJob); int qemuProcessStartManagedPRDaemon(virDomainObjPtr vm); diff --git a/tests/qemuhotplugtest.c b/tests/qemuhotplugtest.c index 1e18820a2b..b95335eca7 100644 --- a/tests/qemuhotplugtest.c +++ b/tests/qemuhotplugtest.c @@ -457,7 +457,7 @@ testQemuHotplugCpuPrepare(const char *test, priv->mon = qemuMonitorTestGetMonitor(data->mon); virObjectUnlock(priv->mon); - if (qemuDomainRefreshVcpuInfo(&driver, data->vm, 0, false) < 0) + if (qemuDomainRefreshVcpuInfo(data->vm, 0, false) < 0) goto error; return data; -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:45PM +0530, Prathamesh Chavan wrote:
The function `qemuDomainObjSaveStatus` required an access to `virQEMUDriverPtr`. To make jobs hypervisor-agnostic we remove this funciton and replace it with a callback function from `qemuDomainJob`
Removal of `virQEMUDriverPtr` as parameter resulted in removal of the same from function, where it was pass. All of such references were removed as the variable was no longer required.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 41 +- src/qemu/qemu_backup.h | 3 +- src/qemu/qemu_block.c | 45 +- src/qemu/qemu_block.h | 6 +- src/qemu/qemu_blockjob.c | 45 +- src/qemu/qemu_blockjob.h | 3 +- src/qemu/qemu_checkpoint.c | 29 +- src/qemu/qemu_domain.c | 78 ++- src/qemu/qemu_domain.h | 24 +- src/qemu/qemu_domainjob.c | 50 +- src/qemu/qemu_domainjob.h | 29 +- src/qemu/qemu_driver.c | 848 ++++++++++++++----------------- src/qemu/qemu_hotplug.c | 319 ++++++------ src/qemu/qemu_hotplug.h | 30 +- src/qemu/qemu_migration.c | 315 +++++------- src/qemu/qemu_migration.h | 12 +- src/qemu/qemu_migration_cookie.c | 7 +- src/qemu/qemu_migration_params.c | 48 +- src/qemu/qemu_migration_params.h | 15 +- src/qemu/qemu_process.c | 258 +++++----- src/qemu/qemu_process.h | 15 +- tests/qemuhotplugtest.c | 2 +- 22 files changed, 986 insertions(+), 1236 deletions(-)
Hi, I'm sorry for the delay, but I spent a while thinking about other approaches to achieve the same what I'm commenting on below. I had to verify every single idea by debugging libvirt so that I would not propose something that was impossible to do and by doing that, I realized a very interesting circular data reference we have: (QEMU)driver->xmlopt->config.priv->(QEMU)driver ...
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 677fa7ea91..d7a944a886 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -634,6 +634,7 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, + .saveStatus = qemuDomainSaveStatus, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, .setJobInfoOperation =qemuDomainJobInfoSetOperation,
Okay, ^this does the job, it works, but I would call it the easy way out. The qemuPrivateJobCallbacks structure hints that it contains callbacks specific to job handling to which qemuDomainSaveStatus is simply not related at all. It just so happens, that we have to save the domain status basically every time we're doing something to the VM. Structurally, I see 2 ways to achieve the same code extraction properly. First, having another structure for callbacks which would nest the existing qemuPrivateJobCallbacks, IOW: struct _qemuDomainObjPrivateCallbacks { /* generic callbacks that we can't really categorize */ qemuDomainObjPrivateSaveStatus saveStatus; /* Job related callbacks */ qemuDomainObjPrivateJobCallbacks jobcb; } We'd then pass ^this structure instead of the qemuDomainObjPrivateJobCallbacks one at the relevant places. I don't like the ^this solution that much either, but I wanted to mention it. I think what we need to do instead is to look what qemuDomainSaveStatus or qemuDomainObjSaveStatus really need. They need to access the driver and its config, that's it. In that perspective it relates to the virDomainObj's private data. Specifically for qemuDomainObjSaveStatus: ... if (virDomainObjIsActive(obj)) ... ^This check can easily be extracted to the virDomainObjSave function, there's not reason why it should be specific to QEMU only. ... if (virDomainObjSave(obj, driver->xmlopt, cfg->stateDir) < 0) ... ^This is the thing we need to call from the hypervisor-agnostic code, except we don't have @driver (note that for example libxl doesn't have @driver as part of the virDomainObj's private data). Considering the above, we need a generic wrapper over virDomainObjSave, let's call it virDomainDriverObjSave: void virDomainDriverObjSave(virDomainObjPtr obj) { return obj->privateDataCallbacks.saveStatus(obj); } struct _virDomainObj { ... void *privateData; void (*privateDataFreeFunc) (void *); virDomainObjSaveStatusCallbackPtr saveStatus; <<<<<<<<< ... }; The saveStatus callback would then have to live inside xmlopt callbacks and be copied over in virDomainObjNew (just like we copy the free callback). This is far from ideal, as it involves @xmlopt as we should not be interacting with it, but we're already abusing the @xmlopt on so many places that it's such an integral part of libvirt that refactoring how and where we use @xmlopt (see the circular referencing above) is IMO beyond even a standalone GSoC project. Alternatively in: struct _virDomainObj { ... void *privateData; virDomainObjPrivateDataCallbacks cb; ... } and then struct _virDomainObjPrivateDataCallbacks { void (*free) (void *); void (*saveStatus) (virDomainObjPtr); } However, ^this would break the consistency we use for freeing privateData in object Dispose functions for example for StoragePools, Volumes, Domains, etc. And since I am a fan of consistency, I would not favour ^this alternative. Erik

I have decided to go with the nested callback structure. I'll be posting a patch related to the same soon. On Wed, Aug 12, 2020 at 5:35 PM Erik Skultety <eskultet@redhat.com> wrote:
On Tue, Aug 04, 2020 at 08:06:45PM +0530, Prathamesh Chavan wrote:
The function `qemuDomainObjSaveStatus` required an access to `virQEMUDriverPtr`. To make jobs hypervisor-agnostic we remove this funciton and replace it with a callback function from `qemuDomainJob`
Removal of `virQEMUDriverPtr` as parameter resulted in removal of the same from function, where it was pass. All of such references were removed as the variable was no longer required.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 41 +- src/qemu/qemu_backup.h | 3 +- src/qemu/qemu_block.c | 45 +- src/qemu/qemu_block.h | 6 +- src/qemu/qemu_blockjob.c | 45 +- src/qemu/qemu_blockjob.h | 3 +- src/qemu/qemu_checkpoint.c | 29 +- src/qemu/qemu_domain.c | 78 ++- src/qemu/qemu_domain.h | 24 +- src/qemu/qemu_domainjob.c | 50 +- src/qemu/qemu_domainjob.h | 29 +- src/qemu/qemu_driver.c | 848 ++++++++++++++----------------- src/qemu/qemu_hotplug.c | 319 ++++++------ src/qemu/qemu_hotplug.h | 30 +- src/qemu/qemu_migration.c | 315 +++++------- src/qemu/qemu_migration.h | 12 +- src/qemu/qemu_migration_cookie.c | 7 +- src/qemu/qemu_migration_params.c | 48 +- src/qemu/qemu_migration_params.h | 15 +- src/qemu/qemu_process.c | 258 +++++----- src/qemu/qemu_process.h | 15 +- tests/qemuhotplugtest.c | 2 +- 22 files changed, 986 insertions(+), 1236 deletions(-)
Hi, I'm sorry for the delay, but I spent a while thinking about other approaches to achieve the same what I'm commenting on below. I had to verify every single idea by debugging libvirt so that I would not propose something that was impossible to do and by doing that, I realized a very interesting circular data reference we have: (QEMU)driver->xmlopt->config.priv->(QEMU)driver
...
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 677fa7ea91..d7a944a886 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -634,6 +634,7 @@ static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, + .saveStatus = qemuDomainSaveStatus, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, .setJobInfoOperation =qemuDomainJobInfoSetOperation,
Okay, ^this does the job, it works, but I would call it the easy way out. The qemuPrivateJobCallbacks structure hints that it contains callbacks specific to job handling to which qemuDomainSaveStatus is simply not related at all. It just so happens, that we have to save the domain status basically every time we're doing something to the VM. Structurally, I see 2 ways to achieve the same code extraction properly. First, having another structure for callbacks which would nest the existing qemuPrivateJobCallbacks, IOW:
struct _qemuDomainObjPrivateCallbacks { /* generic callbacks that we can't really categorize */ qemuDomainObjPrivateSaveStatus saveStatus;
/* Job related callbacks */ qemuDomainObjPrivateJobCallbacks jobcb; }
We'd then pass ^this structure instead of the qemuDomainObjPrivateJobCallbacks one at the relevant places.
I don't like the ^this solution that much either, but I wanted to mention it.
I think what we need to do instead is to look what qemuDomainSaveStatus or qemuDomainObjSaveStatus really need. They need to access the driver and its config, that's it. In that perspective it relates to the virDomainObj's private data. Specifically for qemuDomainObjSaveStatus:
... if (virDomainObjIsActive(obj)) ...
^This check can easily be extracted to the virDomainObjSave function, there's not reason why it should be specific to QEMU only.
... if (virDomainObjSave(obj, driver->xmlopt, cfg->stateDir) < 0) ...
^This is the thing we need to call from the hypervisor-agnostic code, except we don't have @driver (note that for example libxl doesn't have @driver as part of the virDomainObj's private data).
Considering the above, we need a generic wrapper over virDomainObjSave, let's call it virDomainDriverObjSave:
void virDomainDriverObjSave(virDomainObjPtr obj) { return obj->privateDataCallbacks.saveStatus(obj); }
struct _virDomainObj { ... void *privateData; void (*privateDataFreeFunc) (void *); virDomainObjSaveStatusCallbackPtr saveStatus; <<<<<<<<< ... };
The saveStatus callback would then have to live inside xmlopt callbacks and be copied over in virDomainObjNew (just like we copy the free callback). This is far from ideal, as it involves @xmlopt as we should not be interacting with it, but we're already abusing the @xmlopt on so many places that it's such an integral part of libvirt that refactoring how and where we use @xmlopt (see the circular referencing above) is IMO beyond even a standalone GSoC project.
Alternatively in: struct _virDomainObj { ... void *privateData; virDomainObjPrivateDataCallbacks cb; ... }
and then
struct _virDomainObjPrivateDataCallbacks { void (*free) (void *); void (*saveStatus) (virDomainObjPtr); }
However, ^this would break the consistency we use for freeing privateData in object Dispose functions for example for StoragePools, Volumes, Domains, etc. And since I am a fan of consistency, I would not favour ^this alternative.
Erik

Functions `qemuDomainRemoveInactiveJob` and `qemuDomainRemoveInactiveJobLocked` had their declaration mispalced in `qemu_domainjob` and were moved to `qemu_domain`. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.h | 6 ++++++ src/qemu/qemu_domainjob.h | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index a247629c17..2cc10a30a4 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -540,6 +540,12 @@ struct _qemuDomainJobPrivate { }; +void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, + virDomainObjPtr vm); + +void qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, + virDomainObjPtr vm); + void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver, virDomainObjPtr vm); diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index ee71ae77e8..437a27f74a 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -197,12 +197,6 @@ int qemuDomainObjRestoreJob(virDomainObjPtr obj, void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj); void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj); -void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, - virDomainObjPtr vm); - -void qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, - virDomainObjPtr vm); - bool qemuDomainTrackJob(qemuDomainJob job); void qemuDomainObjFreeJob(qemuDomainJobObjPtr job); -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:46PM +0530, Prathamesh Chavan wrote:
Functions `qemuDomainRemoveInactiveJob` and `qemuDomainRemoveInactiveJobLocked` had their declaration mispalced in `qemu_domainjob` and were moved to
s/mispalced/misplaced
`qemu_domain`.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- Reviewed-by: Erik Skultety <eskultet@redhat.com>

To remove dependency of funcitons to access the `privateData` of qemu-domain, we introduce this callback funciton so that funcitons get exactly what they need. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_domainjob.c | 4 ++-- src/qemu/qemu_domainjob.h | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index d7a944a886..2e16c8e5fe 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -629,12 +629,20 @@ qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, return 0; } +static virDomainXMLOptionPtr +qemuGetDomainXMLOptionPtr(virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + return priv->driver->xmlopt; + +} static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, .saveStatus = qemuDomainSaveStatus, + .getDomainXMLOptionPtr = qemuGetDomainXMLOptionPtr, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, .setJobInfoOperation = qemuDomainJobInfoSetOperation, diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 19c847dffc..3eff45fd17 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -765,7 +765,7 @@ qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, if (diskPriv->migrSource && qemuDomainObjPrivateXMLFormatNBDMigrationSource(&childBuf, diskPriv->migrSource, - priv->driver->xmlopt) < 0) + priv->job.cb->getDomainXMLOptionPtr(vm)) < 0) return -1; virXMLFormatElement(buf, "disk", &attrBuf, &childBuf); @@ -892,7 +892,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, if (qemuDomainObjPrivateXMLParseJobNBDSource(nodes[i], ctxt, disk, - priv->driver->xmlopt) < 0) + priv->job.cb->getDomainXMLOptionPtr(vm)) < 0) return -1; } } diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 437a27f74a..220257775d 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -105,6 +105,7 @@ typedef void *(*qemuDomainObjPrivateJobAlloc)(void); typedef void (*qemuDomainObjPrivateJobFree)(void *); typedef void (*qemuDomainObjPrivateJobReset)(void *); typedef void (*qemuSaveStatus)(virDomainObjPtr); +typedef virDomainXMLOptionPtr (*qemuGetDomainXmlOptionPtr)(virDomainObjPtr); typedef int (*qemuDomainObjPrivateJobFormat)(virBufferPtr, qemuDomainJobObjPtr); typedef int (*qemuDomainObjPrivateJobParse)(xmlXPathContextPtr, @@ -121,6 +122,7 @@ struct _qemuDomainObjPrivateJobCallbacks { qemuDomainObjPrivateJobFree freeJobPrivate; qemuDomainObjPrivateJobReset resetJobPrivate; qemuSaveStatus saveStatus; + qemuGetDomainXmlOptionPtr getDomainXMLOptionPtr; qemuDomainObjPrivateJobFormat formatJob; qemuDomainObjPrivateJobParse parseJob; qemuDomainObjJobInfoSetOperation setJobInfoOperation; -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:47PM +0530, Prathamesh Chavan wrote:
To remove dependency of funcitons to access the `privateData` of qemu-domain, we introduce this callback funciton so that funcitons get exactly what they need.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_domainjob.c | 4 ++-- src/qemu/qemu_domainjob.h | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index d7a944a886..2e16c8e5fe 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -629,12 +629,20 @@ qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt, return 0; }
+static virDomainXMLOptionPtr +qemuGetDomainXMLOptionPtr(virDomainObjPtr vm) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + return priv->driver->xmlopt; + +}
static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = { .allocJobPrivate = qemuJobAllocPrivate, .freeJobPrivate = qemuJobFreePrivate, .resetJobPrivate = qemuJobResetPrivate, .saveStatus = qemuDomainSaveStatus, + .getDomainXMLOptionPtr = qemuGetDomainXMLOptionPtr, .formatJob = qemuDomainFormatJobPrivate, .parseJob = qemuDomainParseJobPrivate, .setJobInfoOperation = qemuDomainJobInfoSetOperation, diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 19c847dffc..3eff45fd17 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -765,7 +765,7 @@ qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, if (diskPriv->migrSource && qemuDomainObjPrivateXMLFormatNBDMigrationSource(&childBuf, diskPriv->migrSource, - priv->driver->xmlopt) < 0) + priv->job.cb->getDomainXMLOptionPtr(vm)) < 0)
NBD migration is very much QEMU specific at the moment, so if you move qemuDomainObjPrivateXMLFormatNBDMigrationSource and functions alike out of the qemu_domainjob module which we're planning on using as a base for the hypervisor-agnostic job handling module, you won't need this patch. Erik

References to `qemuDomainObjPrivatePtr` in qemu_domainjob were removed as it is a qemu-hypervisor specific pointer. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/qemu/qemu_backup.c | 15 +- src/qemu/qemu_checkpoint.c | 12 +- src/qemu/qemu_domain.c | 20 +- src/qemu/qemu_domainjob.c | 222 ++++++-------- src/qemu/qemu_domainjob.h | 34 ++- src/qemu/qemu_driver.c | 602 ++++++++++++++++++++++--------------- src/qemu/qemu_migration.c | 23 +- src/qemu/qemu_process.c | 42 +-- 8 files changed, 548 insertions(+), 422 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 7e5926250a..4e606c252f 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -572,7 +572,7 @@ qemuBackupJobTerminate(virDomainObjPtr vm, virDomainBackupDefFree(priv->backup); priv->backup = NULL; - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); } @@ -740,13 +740,14 @@ qemuBackupBegin(virDomainObjPtr vm, * infrastructure for async jobs. We'll allow standard modify-type jobs * as the interlocking of conflicting operations is handled on the block * job level */ - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_BACKUP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_BACKUP, VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0) return -1; - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MODIFY))); + qemuDomainObjSetAsyncJobMask(&priv->job, + (QEMU_JOB_DEFAULT_MASK | + JOB_MASK(QEMU_JOB_SUSPEND) | + JOB_MASK(QEMU_JOB_MODIFY))); jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; if (!virDomainObjIsActive(vm)) { @@ -877,9 +878,9 @@ qemuBackupBegin(virDomainObjPtr vm, def = g_steal_pointer(&priv->backup); if (ret == 0) - qemuDomainObjReleaseAsyncJob(vm); + qemuDomainObjReleaseAsyncJob(&priv->job); else - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); return ret; } diff --git a/src/qemu/qemu_checkpoint.c b/src/qemu/qemu_checkpoint.c index b90410aa20..e9547da555 100644 --- a/src/qemu/qemu_checkpoint.c +++ b/src/qemu/qemu_checkpoint.c @@ -539,7 +539,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, /* Unlike snapshots, the RNG schema already ensured a sane filename. */ /* We are going to modify the domain below. */ - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return NULL; if (redefine) { @@ -561,7 +561,7 @@ qemuCheckpointCreateXML(virDomainPtr domain, checkpoint = virGetDomainCheckpoint(domain, chk->def->name); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return checkpoint; } @@ -588,7 +588,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, size_t i; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -697,7 +697,7 @@ qemuCheckpointGetXMLDescUpdateSize(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -781,7 +781,7 @@ qemuCheckpointDelete(virDomainObjPtr vm, VIR_DOMAIN_CHECKPOINT_DELETE_METADATA_ONLY | VIR_DOMAIN_CHECKPOINT_DELETE_CHILDREN_ONLY, -1); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (!metadata_only) { @@ -849,6 +849,6 @@ qemuCheckpointDelete(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 2e16c8e5fe..46069e02bc 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2819,7 +2819,7 @@ qemuDomainObjPrivateXMLFormat(virBufferPtr buf, if (priv->lockState) virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState); - if (qemuDomainObjPrivateXMLFormatJob(buf, vm) < 0) + if (qemuDomainObjPrivateXMLFormatJob(buf, vm, &priv->job) < 0) return -1; if (priv->fakeReboot) @@ -3478,7 +3478,7 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, priv->lockState = virXPathString("string(./lockstate)", ctxt); - if (qemuDomainObjPrivateXMLParseJob(vm, ctxt) < 0) + if (qemuDomainObjPrivateXMLParseJob(vm, ctxt, &priv->job) < 0) goto error; priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1; @@ -5921,12 +5921,12 @@ qemuDomainObjEnterMonitorInternal(virDomainObjPtr obj, if (asyncJob != QEMU_ASYNC_JOB_NONE) { int ret; - if ((ret = qemuDomainObjBeginNestedJob(obj, asyncJob)) < 0) + if ((ret = qemuDomainObjBeginNestedJob(obj, &priv->job, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); - qemuDomainObjEndJob(obj); + qemuDomainObjEndJob(obj, &priv->job); return -1; } } else if (priv->job.asyncOwner == virThreadSelfID()) { @@ -5971,7 +5971,7 @@ qemuDomainObjExitMonitorInternal(virDomainObjPtr obj) priv->mon = NULL; if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjEndJob(obj); + qemuDomainObjEndJob(obj, &priv->job); } void qemuDomainObjEnterMonitor(virDomainObjPtr obj) @@ -7113,13 +7113,14 @@ qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver, virDomainObjPtr vm) { bool haveJob; + qemuDomainObjPrivatePtr priv = vm->privateData; - haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactive(driver, vm); if (haveJob) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -7134,13 +7135,14 @@ qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver, virDomainObjPtr vm) { bool haveJob; + qemuDomainObjPrivatePtr priv = vm->privateData; - haveJob = qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) >= 0; + haveJob = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) >= 0; qemuDomainRemoveInactiveLocked(driver, vm); if (haveJob) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 3eff45fd17..ccbb7866b3 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -18,7 +18,6 @@ #include <config.h> -#include "qemu_domain.h" #include "qemu_migration.h" #include "qemu_domainjob.h" #include "viralloc.h" @@ -178,26 +177,24 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObjPtr job) } int -qemuDomainObjRestoreJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job) -{ - qemuDomainObjPrivatePtr priv = obj->privateData; +qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, + qemuDomainJobObjPtr oldJob) +{ + memset(oldJob, 0, sizeof(*oldJob)); + oldJob->active = job->active; + oldJob->owner = job->owner; + oldJob->asyncJob = job->asyncJob; + oldJob->asyncOwner = job->asyncOwner; + oldJob->phase = job->phase; + oldJob->privateData = g_steal_pointer(&job->privateData); + oldJob->apiFlags = job->apiFlags; - memset(job, 0, sizeof(*job)); - job->active = priv->job.active; - job->owner = priv->job.owner; - job->asyncJob = priv->job.asyncJob; - job->asyncOwner = priv->job.asyncOwner; - job->phase = priv->job.phase; - job->privateData = g_steal_pointer(&priv->job.privateData); - job->apiFlags = priv->job.apiFlags; - - if (!(priv->job.privateData = priv->job.cb->allocJobPrivate())) + if (!(job->privateData = job->cb->allocJobPrivate())) return -1; - job->cb = priv->job.cb; + oldJob->cb = job->cb; - qemuDomainObjResetJob(&priv->job); - qemuDomainObjResetAsyncJob(&priv->job); + qemuDomainObjResetJob(job); + qemuDomainObjResetAsyncJob(job); return 0; } @@ -220,65 +217,61 @@ qemuDomainTrackJob(qemuDomainJob job) void qemuDomainObjSetJobPhase(virDomainObjPtr obj, + qemuDomainJobObjPtr job, int phase) { - qemuDomainObjPrivatePtr priv = obj->privateData; unsigned long long me = virThreadSelfID(); - if (!priv->job.asyncJob) + if (!job->asyncJob) return; VIR_DEBUG("Setting '%s' phase to '%s'", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase)); + qemuDomainAsyncJobTypeToString(job->asyncJob), + qemuDomainAsyncJobPhaseToString(job->asyncJob, phase)); - if (priv->job.asyncOwner && me != priv->job.asyncOwner) { + if (job->asyncOwner && me != job->asyncOwner) { VIR_WARN("'%s' async job is owned by thread %llu", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - priv->job.asyncOwner); + qemuDomainAsyncJobTypeToString(job->asyncJob), + job->asyncOwner); } - priv->job.phase = phase; - priv->job.asyncOwner = me; - priv->job.cb->saveStatus(obj); + job->phase = phase; + job->asyncOwner = me; + job->cb->saveStatus(obj); } void -qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, +qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job, unsigned long long allowedJobs) { - qemuDomainObjPrivatePtr priv = obj->privateData; - - if (!priv->job.asyncJob) + if (!job->asyncJob) return; - priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY); + job->mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY); } void -qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj) +qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - if (priv->job.active == QEMU_JOB_ASYNC_NESTED) - qemuDomainObjResetJob(&priv->job); - qemuDomainObjResetAsyncJob(&priv->job); - priv->job.cb->saveStatus(obj); + if (job->active == QEMU_JOB_ASYNC_NESTED) + qemuDomainObjResetJob(job); + qemuDomainObjResetAsyncJob(job); + job->cb->saveStatus(obj); } void -qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj) +qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - VIR_DEBUG("Releasing ownership of '%s' async job", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + qemuDomainAsyncJobTypeToString(job->asyncJob)); - if (priv->job.asyncOwner != virThreadSelfID()) { + if (job->asyncOwner != virThreadSelfID()) { VIR_WARN("'%s' async job is owned by thread %llu", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), - priv->job.asyncOwner); + qemuDomainAsyncJobTypeToString(job->asyncJob), + job->asyncOwner); } - priv->job.asyncOwner = 0; + job->asyncOwner = 0; } static bool @@ -542,11 +535,9 @@ qemuDomainObjBeginJobInternal(virDomainObjPtr obj, * Successful calls must be followed by EndJob eventually */ int qemuDomainObjBeginJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - if (qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, false) < 0) @@ -564,51 +555,45 @@ int qemuDomainObjBeginJob(virDomainObjPtr obj, */ int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAgentJob agentJob) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - return qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_NONE, agentJob, QEMU_ASYNC_JOB_NONE, false); } int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - if (qemuDomainObjBeginJobInternal(obj, jobObj, QEMU_JOB_ASYNC, QEMU_AGENT_JOB_NONE, asyncJob, false) < 0) return -1; - priv->job.cb->setJobInfoOperation(jobObj, operation); - priv->job.apiFlags = apiFlags; + jobObj->cb->setJobInfoOperation(jobObj, operation); + jobObj->apiFlags = apiFlags; return 0; } int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - - if (asyncJob != priv->job.asyncJob) { + if (asyncJob != jobObj->asyncJob) { virReportError(VIR_ERR_INTERNAL_ERROR, _("unexpected async job %d type expected %d"), - asyncJob, priv->job.asyncJob); + asyncJob, jobObj->asyncJob); return -1; } - if (priv->job.asyncOwner != virThreadSelfID()) { + if (jobObj->asyncOwner != virThreadSelfID()) { VIR_WARN("This thread doesn't seem to be the async job owner: %llu", - priv->job.asyncOwner); + jobObj->asyncOwner); } return qemuDomainObjBeginJobInternal(obj, jobObj, @@ -622,6 +607,7 @@ qemuDomainObjBeginNestedJob(virDomainObjPtr obj, * qemuDomainObjBeginJobNowait: * * @obj: domain object + * @jobObj: qemuDomainJobObjPtr * @job: qemuDomainJob to start * * Acquires job for a domain object which must be locked before @@ -632,11 +618,9 @@ qemuDomainObjBeginNestedJob(virDomainObjPtr obj, */ int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - return qemuDomainObjBeginJobInternal(obj, jobObj, job, QEMU_AGENT_JOB_NONE, QEMU_ASYNC_JOB_NONE, true); @@ -649,74 +633,68 @@ qemuDomainObjBeginJobNowait(virDomainObjPtr obj, * earlier qemuDomainBeginJob() call */ void -qemuDomainObjEndJob(virDomainObjPtr obj) +qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - qemuDomainJob job = priv->job.active; + qemuDomainJob job = jobObj->active; jobObj->jobs_queued--; VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)", qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetJob(&priv->job); + qemuDomainObjResetJob(jobObj); if (qemuDomainTrackJob(job)) jobObj->cb->saveStatus(obj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ - virCondBroadcast(&priv->job.cond); + virCondBroadcast(&jobObj->cond); } void -qemuDomainObjEndAgentJob(virDomainObjPtr obj) +qemuDomainObjEndAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - qemuDomainAgentJob agentJob = priv->job.agentActive; + qemuDomainAgentJob agentJob = jobObj->agentActive; jobObj->jobs_queued--; VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)", qemuDomainAgentJobTypeToString(agentJob), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetAgentJob(&priv->job); + qemuDomainObjResetAgentJob(jobObj); /* We indeed need to wake up ALL threads waiting because * grabbing a job requires checking more variables. */ - virCondBroadcast(&priv->job.cond); + virCondBroadcast(&jobObj->cond); } void -qemuDomainObjEndAsyncJob(virDomainObjPtr obj) +qemuDomainObjEndAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = obj->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; - jobObj->jobs_queued--; VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(jobObj->asyncJob), obj, obj->def->name); - qemuDomainObjResetAsyncJob(&priv->job); + qemuDomainObjResetAsyncJob(jobObj); jobObj->cb->saveStatus(obj); - virCondBroadcast(&priv->job.asyncCond); + virCondBroadcast(&jobObj->asyncCond); } void -qemuDomainObjAbortAsyncJob(virDomainObjPtr obj) +qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = obj->privateData; - VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)", - qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + qemuDomainAsyncJobTypeToString(job->asyncJob), obj, obj->def->name); - priv->job.abortJob = true; + job->abortJob = true; virDomainObjBroadcast(obj); } @@ -746,9 +724,9 @@ qemuDomainObjPrivateXMLFormatNBDMigrationSource(virBufferPtr buf, static int qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, - virDomainObjPtr vm) + virDomainObjPtr vm, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = vm->privateData; size_t i; virDomainDiskDefPtr disk; qemuDomainDiskPrivatePtr diskPriv; @@ -765,7 +743,7 @@ qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, if (diskPriv->migrSource && qemuDomainObjPrivateXMLFormatNBDMigrationSource(&childBuf, diskPriv->migrSource, - priv->job.cb->getDomainXMLOptionPtr(vm)) < 0) + job->cb->getDomainXMLOptionPtr(vm)) < 0) return -1; virXMLFormatElement(buf, "disk", &attrBuf, &childBuf); @@ -776,36 +754,35 @@ qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf, int qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm) + virDomainObjPtr vm, + qemuDomainJobObjPtr jobObj) { - qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobObjPtr jobObj = &priv->job; g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER; g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf); - qemuDomainJob job = priv->job.active; + qemuDomainJob job = jobObj->active; if (!qemuDomainTrackJob(job)) job = QEMU_JOB_NONE; if (job == QEMU_JOB_NONE && - priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) + jobObj->asyncJob == QEMU_ASYNC_JOB_NONE) return 0; virBufferAsprintf(&attrBuf, " type='%s' async='%s'", qemuDomainJobTypeToString(job), - qemuDomainAsyncJobTypeToString(priv->job.asyncJob)); + qemuDomainAsyncJobTypeToString(jobObj->asyncJob)); - if (priv->job.phase) { + if (jobObj->phase) { virBufferAsprintf(&attrBuf, " phase='%s'", - qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, - priv->job.phase)); + qemuDomainAsyncJobPhaseToString(jobObj->asyncJob, + jobObj->phase)); } - if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE) - virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags); + if (jobObj->asyncJob != QEMU_ASYNC_JOB_NONE) + virBufferAsprintf(&attrBuf, " flags='0x%lx'", jobObj->apiFlags); - if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && - qemuDomainObjPrivateXMLFormatNBDMigration(&childBuf, vm) < 0) + if (jobObj->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && + qemuDomainObjPrivateXMLFormatNBDMigration(&childBuf, vm, jobObj) < 0) return -1; if (jobObj->cb->formatJob(&childBuf, jobObj) < 0) @@ -866,9 +843,9 @@ qemuDomainObjPrivateXMLParseJobNBDSource(xmlNodePtr node, static int qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, - xmlXPathContextPtr ctxt) + xmlXPathContextPtr ctxt, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = vm->privateData; g_autofree xmlNodePtr *nodes = NULL; size_t i; int n; @@ -877,7 +854,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, return -1; if (n > 0) { - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { + if (job->asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { VIR_WARN("Found disks marked for migration but we were not " "migrating"); n = 0; @@ -892,7 +869,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, if (qemuDomainObjPrivateXMLParseJobNBDSource(nodes[i], ctxt, disk, - priv->job.cb->getDomainXMLOptionPtr(vm)) < 0) + job->cb->getDomainXMLOptionPtr(vm)) < 0) return -1; } } @@ -903,10 +880,9 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm, int qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - xmlXPathContextPtr ctxt) + xmlXPathContextPtr ctxt, + qemuDomainJobObjPtr job) { - qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainJobObjPtr job = &priv->job; VIR_XPATH_NODE_AUTORESTORE(ctxt); g_autofree char *tmp = NULL; @@ -922,7 +898,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, return -1; } VIR_FREE(tmp); - priv->job.active = type; + job->active = type; } if ((tmp = virXPathString("string(@async)", ctxt))) { @@ -934,11 +910,11 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, return -1; } VIR_FREE(tmp); - priv->job.asyncJob = async; + job->asyncJob = async; if ((tmp = virXPathString("string(@phase)", ctxt))) { - priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp); - if (priv->job.phase < 0) { + job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp); + if (job->phase < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Unknown job phase %s"), tmp); return -1; @@ -947,12 +923,12 @@ qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, } } - if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) { + if (virXPathULongHex("string(@flags)", ctxt, &job->apiFlags) == -2) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid job flags")); return -1; } - if (qemuDomainObjPrivateXMLParseJobNBD(vm, ctxt) < 0) + if (qemuDomainObjPrivateXMLParseJobNBD(vm, ctxt, job) < 0) return -1; if (job->cb->parseJob(ctxt, job) < 0) diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index 220257775d..bf35efc444 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -169,35 +169,45 @@ int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, const char *phase); int qemuDomainObjBeginJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAgentJob agentJob) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob, virDomainJobOperation operation, unsigned long apiFlags) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginNestedJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainAsyncJob asyncJob) G_GNUC_WARN_UNUSED_RESULT; int qemuDomainObjBeginJobNowait(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj, qemuDomainJob job) G_GNUC_WARN_UNUSED_RESULT; -void qemuDomainObjEndJob(virDomainObjPtr obj); -void qemuDomainObjEndAgentJob(virDomainObjPtr obj); -void qemuDomainObjEndAsyncJob(virDomainObjPtr obj); -void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj); +void qemuDomainObjEndJob(virDomainObjPtr obj, qemuDomainJobObjPtr jobObj); +void qemuDomainObjEndAgentJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj); +void qemuDomainObjEndAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr jobObj); +void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job); void qemuDomainObjSetJobPhase(virDomainObjPtr obj, + qemuDomainJobObjPtr job, int phase); -void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj, +void qemuDomainObjSetAsyncJobMask(qemuDomainJobObjPtr job, unsigned long long allowedJobs); -int qemuDomainObjRestoreJob(virDomainObjPtr obj, - qemuDomainJobObjPtr job); -void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj); -void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj); +int qemuDomainObjRestoreJob(qemuDomainJobObjPtr job, + qemuDomainJobObjPtr oldJob); +void qemuDomainObjDiscardAsyncJob(virDomainObjPtr obj, + qemuDomainJobObjPtr job); +void qemuDomainObjReleaseAsyncJob(qemuDomainJobObjPtr job); bool qemuDomainTrackJob(qemuDomainJob job); @@ -212,8 +222,10 @@ bool qemuDomainJobAllowed(qemuDomainJobObjPtr jobs, qemuDomainJob newJob); int qemuDomainObjPrivateXMLFormatJob(virBufferPtr buf, - virDomainObjPtr vm); + virDomainObjPtr vm, + qemuDomainJobObjPtr jobObj); int qemuDomainObjPrivateXMLParseJob(virDomainObjPtr vm, - xmlXPathContextPtr ctxt); + xmlXPathContextPtr ctxt, + qemuDomainJobObjPtr job); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 6623392495..721c33da2e 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1827,7 +1827,7 @@ static int qemuDomainSuspend(virDomainPtr dom) cfg = virQEMUDriverGetConfig(driver); priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_SUSPEND) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_SUSPEND) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1854,7 +1854,7 @@ static int qemuDomainSuspend(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -1866,6 +1866,7 @@ static int qemuDomainSuspend(virDomainPtr dom) static int qemuDomainResume(virDomainPtr dom) { virQEMUDriverPtr driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; int ret = -1; int state; @@ -1876,11 +1877,12 @@ static int qemuDomainResume(virDomainPtr dom) return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainResumeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -1912,7 +1914,7 @@ static int qemuDomainResume(virDomainPtr dom) ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -1928,10 +1930,11 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, { int ret = -1; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int agentFlag = isReboot ? QEMU_AGENT_SHUTDOWN_REBOOT : QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1949,7 +1952,7 @@ qemuDomainShutdownFlagsAgent(virQEMUDriverPtr driver, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -1964,7 +1967,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) { @@ -1980,7 +1983,7 @@ qemuDomainShutdownFlagsMonitor(virQEMUDriverPtr driver, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -2059,11 +2062,13 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, qemuAgentPtr agent; int ret = -1; int agentFlag = QEMU_AGENT_SHUTDOWN_REBOOT; + qemuDomainObjPrivatePtr priv = vm->privateData; if (!isReboot) agentFlag = QEMU_AGENT_SHUTDOWN_POWERDOWN; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (!qemuDomainAgentAvailable(vm, agentForced)) @@ -2078,7 +2083,7 @@ qemuDomainRebootAgent(virQEMUDriverPtr driver, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -2091,7 +2096,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -2104,7 +2109,7 @@ qemuDomainRebootMonitor(virQEMUDriverPtr driver, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -2177,16 +2182,17 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainResetEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemReset(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) @@ -2199,7 +2205,7 @@ qemuDomainReset(virDomainPtr dom, unsigned int flags) virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_CRASHED); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2284,7 +2290,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, endjob: if (ret == 0) qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2354,11 +2360,12 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetMemoryFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2420,7 +2427,6 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, } if (def) { - priv = vm->privateData; qemuDomainObjEnterMonitor(vm); r = qemuMonitorSetBalloon(priv->mon, newmem); if (qemuDomainObjExitMonitor(vm) < 0 || r < 0) @@ -2445,7 +2451,7 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2480,11 +2486,12 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetMemoryStatsPeriodEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -2530,7 +2537,7 @@ static int qemuDomainSetMemoryStatsPeriod(virDomainPtr dom, int period, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2553,7 +2560,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2565,7 +2572,7 @@ static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags) ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -2612,7 +2619,7 @@ static int qemuDomainSendKey(virDomainPtr domain, if (virDomainSendKeyEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -2624,7 +2631,7 @@ static int qemuDomainSendKey(virDomainPtr domain, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -3316,7 +3323,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SAVE, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SAVE, VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0) goto cleanup; @@ -3410,7 +3417,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virErrorRestore(&save_err); } } - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); if (ret == 0) qemuDomainRemoveInactiveJob(driver, vm); @@ -3903,10 +3910,12 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainCoreDumpWithFormatEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) goto cleanup; @@ -3973,7 +3982,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, } } - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); if (ret == 0 && flags & VIR_DUMP_CRASH) qemuDomainRemoveInactiveJob(driver, vm); @@ -4023,7 +4032,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (virDomainScreenshotEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -4097,7 +4106,7 @@ qemuDomainScreenshot(virDomainPtr dom, if (unlink_tmp) unlink(tmp); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -4138,13 +4147,14 @@ processWatchdogEvent(virQEMUDriverPtr driver, g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); g_autofree char *dumpfile = getAutoDumpPath(driver, vm); unsigned int flags = VIR_DUMP_MEMORY_ONLY; + qemuDomainObjPrivatePtr priv = vm->privateData; if (!dumpfile) return; switch (action) { case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) { return; @@ -4172,7 +4182,7 @@ processWatchdogEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); } static int @@ -4221,7 +4231,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, bool removeInactive = false; unsigned long flags = VIR_DUMP_MEMORY_ONLY; - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_DUMP, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_DUMP, VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0) return; @@ -4287,7 +4297,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); if (removeInactive) qemuDomainRemoveInactiveJob(driver, vm); } @@ -4299,12 +4309,13 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, const char *devAlias) { g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); + qemuDomainObjPrivatePtr priv = vm->privateData; virDomainDeviceDef dev; VIR_DEBUG("Removing device %s from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4327,7 +4338,7 @@ processDeviceDeletedEvent(virQEMUDriverPtr driver, devAlias); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4542,7 +4553,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm, "from domain %p %s", devAlias, vm, vm->def->name); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -4624,7 +4635,7 @@ processNicRxFilterChangedEvent(virDomainObjPtr vm, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virNetDevRxFilterFree(hostFilter); @@ -4670,7 +4681,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, memset(&dev, 0, sizeof(dev)); } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4711,7 +4722,7 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4723,8 +4734,9 @@ processBlockJobEvent(virDomainObjPtr vm, { virDomainDiskDefPtr disk; g_autoptr(qemuBlockJobData) job = NULL; + qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4749,7 +4761,7 @@ processBlockJobEvent(virDomainObjPtr vm, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4757,7 +4769,9 @@ static void processJobStatusChangeEvent(virDomainObjPtr vm, qemuBlockJobDataPtr job) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + qemuDomainObjPrivatePtr priv = vm->privateData; + + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return; if (!virDomainObjIsActive(vm)) { @@ -4768,7 +4782,7 @@ processJobStatusChangeEvent(virDomainObjPtr vm, qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -4814,7 +4828,7 @@ processMonitorEOFEvent(virQEMUDriverPtr driver, endjob: qemuDomainRemoveInactive(driver, vm); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } @@ -5053,6 +5067,7 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, bool hotpluggable = !!(flags & VIR_DOMAIN_VCPU_HOTPLUGGABLE); bool useAgent = !!(flags & VIR_DOMAIN_VCPU_GUEST); int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | @@ -5063,15 +5078,18 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainSetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; if (useAgent) { - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; } else { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; } @@ -5088,9 +5106,9 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, endjob: if (useAgent) - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); else - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -5201,6 +5219,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, virBitmapPtr pcpumap = NULL; virDomainVcpuDefPtr vcpuinfo = NULL; g_autoptr(virQEMUDriverConfig) cfg = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -5210,10 +5229,12 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5252,7 +5273,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -5337,13 +5358,14 @@ qemuDomainPinEmulator(virDomainPtr dom, if (virDomainPinEmulatorEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + priv = vm->privateData; + + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) goto endjob; - priv = vm->privateData; if (!(pcpumap = virBitmapNewData(cpumap, maplen))) goto endjob; @@ -5404,7 +5426,7 @@ qemuDomainPinEmulator(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: if (cgroup_emulator) @@ -5506,6 +5528,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) qemuAgentPtr agent; int ncpuinfo = -1; size_t i; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | @@ -5515,6 +5538,8 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainGetVcpusFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; @@ -5522,7 +5547,8 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) goto cleanup; if (flags & VIR_DOMAIN_VCPU_GUEST) { - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -5540,7 +5566,7 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); if (ncpuinfo < 0) goto cleanup; @@ -5598,14 +5624,14 @@ static int qemuDomainGetIOThreadsLive(virDomainObjPtr vm, virDomainIOThreadInfoPtr **info) { - qemuDomainObjPrivatePtr priv; + qemuDomainObjPrivatePtr priv = vm->privateData; qemuMonitorIOThreadInfoPtr *iothreads = NULL; virDomainIOThreadInfoPtr *info_ret = NULL; int niothreads = 0; size_t i; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -5614,7 +5640,6 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm, goto endjob; } - priv = vm->privateData; if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("IOThreads not supported with this binary")); @@ -5655,7 +5680,7 @@ qemuDomainGetIOThreadsLive(virDomainObjPtr vm, ret = niothreads; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: if (info_ret) { @@ -5798,7 +5823,7 @@ qemuDomainPinIOThread(virDomainPtr dom, if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -5887,7 +5912,7 @@ qemuDomainPinIOThread(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: if (cgroup_iothread) @@ -6248,7 +6273,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -6335,7 +6360,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -7680,6 +7705,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, int ret = -1; int nsnapshots; int ncheckpoints; + qemuDomainObjPrivatePtr priv; g_autoptr(virQEMUDriverConfig) cfg = NULL; g_autofree char *nvram_path = NULL; @@ -7699,12 +7725,14 @@ qemuDomainUndefineFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + cfg = virQEMUDriverGetConfig(driver); if (virDomainUndefineFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!vm->persistent) { @@ -7800,7 +7828,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8762,6 +8790,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; int ret = -1; virNWFilterReadLockFilterUpdates(); @@ -8769,10 +8798,12 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainAttachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8784,7 +8815,7 @@ qemuDomainAttachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -8829,7 +8860,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, if (virDomainUpdateDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -8898,7 +8929,7 @@ static int qemuDomainUpdateDeviceFlags(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainDefFree(vmdef); @@ -9076,15 +9107,18 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; int ret = -1; if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainDetachDeviceFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -9096,7 +9130,7 @@ qemuDomainDetachDeviceFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9111,15 +9145,18 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; int ret = -1; if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainDetachDeviceAliasEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjUpdateModificationImpact(vm, &flags) < 0) @@ -9131,7 +9168,7 @@ qemuDomainDetachDeviceAlias(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9174,11 +9211,13 @@ static int qemuDomainSetAutostart(virDomainPtr dom, g_autofree char *autostartLink = NULL; int ret = -1; g_autoptr(virQEMUDriverConfig) cfg = NULL; + qemuDomainObjPrivatePtr priv; if (!(vm = qemuDomainObjFromDomain(dom))) return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetAutostartEnsureACL(dom->conn, vm->def) < 0) goto cleanup; @@ -9192,7 +9231,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, autostart = (autostart != 0); if (vm->autostart != autostart) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(configFile = virDomainConfigFile(cfg->configDir, vm->def->name))) @@ -9230,7 +9269,7 @@ static int qemuDomainSetAutostart(virDomainPtr dom, vm->autostart = autostart; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } ret = 0; @@ -9338,7 +9377,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9372,7 +9411,7 @@ qemuDomainSetBlkioParameters(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9514,7 +9553,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; /* QEMU and LXC implementation are identical */ @@ -9545,7 +9584,7 @@ qemuDomainSetMemoryParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -9768,7 +9807,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, } } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -9823,7 +9862,7 @@ qemuDomainSetNumaParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virBitmapFree(nodeset); @@ -9977,7 +10016,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, if (virDomainSetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10019,7 +10058,7 @@ qemuDomainSetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10048,17 +10087,17 @@ qemuDomainGetPerfEvents(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainGetPerfEventsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(def = virDomainObjGetOneDef(vm, flags))) goto endjob; - priv = vm->privateData; - for (i = 0; i < VIR_PERF_EVENT_LAST; i++) { bool perf_enabled; @@ -10080,7 +10119,7 @@ qemuDomainGetPerfEvents(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10254,7 +10293,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -10488,7 +10527,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainDefFree(persistentDefCopy); @@ -10782,7 +10821,7 @@ qemuDomainBlockResize(virDomainPtr dom, if (virDomainBlockResizeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -10827,7 +10866,7 @@ qemuDomainBlockResize(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -10972,14 +11011,17 @@ qemuDomainBlockStats(virDomainPtr dom, qemuBlockStatsPtr blockstats = NULL; int ret = -1; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -11002,7 +11044,7 @@ qemuDomainBlockStats(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -11022,6 +11064,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, qemuBlockStatsPtr blockstats = NULL; int nstats; int ret = -1; + qemuDomainObjPrivatePtr priv; VIR_DEBUG("params=%p, flags=0x%x", params, flags); @@ -11033,10 +11076,12 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainBlockStatsFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -11089,7 +11134,7 @@ qemuDomainBlockStatsFlags(virDomainPtr dom, *nparams = nstats; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(blockstats); @@ -11152,6 +11197,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, bool inboundSpecified = false, outboundSpecified = false; int actualType; bool qosSupported = true; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -11177,11 +11223,12 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -11355,7 +11402,7 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virNetDevBandwidthFree(bandwidth); @@ -11518,6 +11565,7 @@ qemuDomainMemoryStats(virDomainPtr dom, unsigned int flags) { virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, -1); @@ -11525,15 +11573,17 @@ qemuDomainMemoryStats(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMemoryStatsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; ret = qemuDomainMemoryStatsInternal(vm, stats, nr_stats); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -11623,6 +11673,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainMemoryPeekEnsureACL(dom->conn, vm->def) < 0) goto cleanup; @@ -11633,7 +11684,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -11651,7 +11702,6 @@ qemuDomainMemoryPeek(virDomainPtr dom, qemuSecurityDomainSetPathLabel(driver, vm, tmp, false); - priv = vm->privateData; qemuDomainObjEnterMonitor(vm); if (flags == VIR_MEMORY_VIRTUAL) { if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) { @@ -11678,7 +11728,7 @@ qemuDomainMemoryPeek(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FORCE_CLOSE(fd); @@ -11898,6 +11948,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; int ret = -1; virDomainDiskDefPtr disk; g_autoptr(virQEMUDriverConfig) cfg = NULL; @@ -11909,11 +11960,12 @@ qemuDomainGetBlockInfo(virDomainPtr dom, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainGetBlockInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (!(disk = virDomainDiskByName(vm->def, path, false))) { @@ -11985,7 +12037,7 @@ qemuDomainGetBlockInfo(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(entry); virDomainObjEndAPI(&vm); @@ -13571,7 +13623,7 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -13607,7 +13659,7 @@ qemuDomainGetJobStatsInternal(virDomainObjPtr vm, ret = 0; cleanup: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -13701,7 +13753,7 @@ qemuDomainAbortJobMigration(virDomainObjPtr vm) VIR_DEBUG("Cancelling migration job at client request"); - qemuDomainObjAbortAsyncJob(vm); + qemuDomainObjAbortAsyncJob(vm, &priv->job); qemuDomainObjEnterMonitor(vm); ret = qemuMonitorMigrateCancel(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) @@ -13722,16 +13774,17 @@ static int qemuDomainAbortJob(virDomainPtr dom) if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainAbortJobEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_ABORT) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_ABORT) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; jobPriv = priv->job.privateData; switch (priv->job.asyncJob) { @@ -13794,7 +13847,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13818,17 +13871,17 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateSetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - VIR_DEBUG("Setting migration downtime to %llums", downtime); if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_DOWNTIME)) { @@ -13853,7 +13906,7 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13870,16 +13923,19 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, qemuMigrationParamsPtr migParams = NULL; int ret = -1; int rc; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -13905,7 +13961,7 @@ qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: qemuMigrationParamsFree(migParams); @@ -13930,17 +13986,17 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateGetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (!qemuMigrationCapsGet(vm, QEMU_MIGRATION_CAP_XBZRLE)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("Compressed migration is not supported by " @@ -13967,7 +14023,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -13990,17 +14046,17 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateSetCompressionCacheEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (!qemuMigrationCapsGet(vm, QEMU_MIGRATION_CAP_XBZRLE)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("Compressed migration is not supported by " @@ -14031,7 +14087,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14078,7 +14134,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -14121,7 +14177,7 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -14136,9 +14192,10 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, g_autoptr(qemuMigrationParams) migParams = NULL; unsigned long long bw; int rc; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -14175,7 +14232,7 @@ qemuDomainMigrationGetPostcopyBandwidth(virDomainObjPtr vm, ret = 0; cleanup: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -14228,17 +14285,17 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainMigrateStartPostCopyEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MIGRATION_OP) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MIGRATION_OP) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("post-copy can only be started while " @@ -14260,7 +14317,7 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -15430,16 +15487,17 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) { int freeze; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) { - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); goto cleanup; } freeze = qemuDomainSnapshotFSFreeze(vm, NULL, 0); - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); if (freeze < 0) { /* the helper reported the error */ @@ -15492,9 +15550,10 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* allow the migration job to be cancelled or the domain to be paused */ - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MIGRATION_OP))); + qemuDomainObjSetAsyncJobMask(&priv->job, + (QEMU_JOB_DEFAULT_MASK | + JOB_MASK(QEMU_JOB_SUSPEND) | + JOB_MASK(QEMU_JOB_MIGRATION_OP))); if ((compressed = qemuGetCompressionProgram(cfg->snapshotImageFormat, &compressor, @@ -15522,7 +15581,8 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, memory_unlink = true; /* forbid any further manipulation */ - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_DEFAULT_MASK); + qemuDomainObjSetAsyncJobMask(&priv->job, + QEMU_JOB_DEFAULT_MASK); } /* the domain is now paused if a memory snapshot was requested */ @@ -15573,7 +15633,8 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, } if (thaw != 0 && - qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) >= 0 && + qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) >= 0 && virDomainObjIsActive(vm)) { if (qemuDomainSnapshotFSThaw(vm, ret == 0 && thaw > 0) < 0) { /* helper reported the error, if it was needed */ @@ -15581,7 +15642,7 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver, ret = -1; } - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); } virQEMUSaveDataFree(data); @@ -15733,11 +15794,11 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, * a regular job, so we need to set the job mask to disallow query as * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_SNAPSHOT, + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_SNAPSHOT, VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0) goto cleanup; - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); + qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE); if (redefine) { if (virDomainSnapshotRedefinePrep(vm, &def, &snap, @@ -15871,7 +15932,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, virDomainSnapshotObjListRemove(vm->snapshots, snap); } - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjEndAsyncJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -16762,6 +16823,7 @@ qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot, bool metadata_only = !!(flags & VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY); int external = 0; g_autoptr(virQEMUDriverConfig) cfg = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN | VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY | @@ -16771,11 +16833,12 @@ qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSnapshotDeleteEnsureACL(snapshot->domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot))) @@ -16848,7 +16911,7 @@ qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17098,17 +17161,17 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, if (!(vm = qemuDomainObjFromDomain(domain))) goto cleanup; + priv = vm->privateData; + if (virDomainQemuMonitorCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_CUSTOM_MONITOR, NULL); hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP); @@ -17119,7 +17182,7 @@ static int qemuDomainQemuMonitorCommand(virDomainPtr domain, const char *cmd, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17437,7 +17500,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, goto cleanup; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17533,7 +17596,7 @@ qemuDomainBlockPullCommon(virDomainObjPtr vm, qemuBlockJobStarted(job, vm); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: qemuBlockJobStartupFinalize(vm, job); @@ -17567,7 +17630,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17650,7 +17713,7 @@ qemuDomainBlockJobAbort(virDomainPtr dom, endjob: if (job && !async) qemuBlockJobSyncEnd(vm, job, QEMU_ASYNC_JOB_NONE); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17717,17 +17780,19 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, int ret = -1; qemuMonitorBlockJobInfo rawInfo; g_autoptr(qemuBlockJobData) job = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_BLOCK_JOB_INFO_BANDWIDTH_BYTES, -1); if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainGetBlockJobInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17755,7 +17820,7 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, } endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17774,6 +17839,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, virDomainObjPtr vm; unsigned long long speed = bandwidth; g_autoptr(qemuBlockJobData) job = NULL; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_BLOCK_JOB_SPEED_BANDWIDTH_BYTES, -1); @@ -17791,10 +17857,12 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainBlockJobSetSpeedEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -17817,7 +17885,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -17996,7 +18064,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, return -1; } - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -18272,7 +18340,7 @@ qemuDomainBlockCopyCommon(virDomainObjPtr vm, if (need_unlink && virStorageFileUnlink(mirror) < 0) VIR_WARN("%s", _("unable to remove just-created copy target")); virStorageFileDeinit(mirror); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); qemuBlockJobStartupFinalize(vm, job); return ret; @@ -18496,7 +18564,7 @@ qemuDomainBlockCommit(virDomainPtr dom, if (virDomainBlockCommitEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -18712,7 +18780,7 @@ qemuDomainBlockCommit(virDomainPtr dom, virErrorRestore(&orig_err); } qemuBlockJobStartupFinalize(vm, job); - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -18736,17 +18804,17 @@ qemuDomainOpenGraphics(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return -1; + priv = vm->privateData; + if (virDomainOpenGraphicsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - if (idx >= vm->def->ngraphics) { virReportError(VIR_ERR_INTERNAL_ERROR, _("No graphics backend with index %d"), idx); @@ -18784,7 +18852,7 @@ qemuDomainOpenGraphics(virDomainPtr dom, ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -18852,14 +18920,14 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom, if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; qemuDomainObjEnterMonitor(vm); ret = qemuMonitorOpenGraphics(priv->mon, protocol, pair[1], "graphicsfd", (flags & VIR_DOMAIN_OPEN_GRAPHICS_SKIPAUTH)); if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); if (ret < 0) goto cleanup; @@ -19097,12 +19165,11 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, goto cleanup; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; - priv = vm->privateData; - if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) goto endjob; @@ -19364,7 +19431,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(info.group_name); @@ -19408,7 +19475,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, if (virDomainGetBlockIoTuneEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; /* the API check guarantees that only one of the definitions will be set */ @@ -19521,7 +19588,7 @@ qemuDomainGetBlockIoTune(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: VIR_FREE(reply.group_name); @@ -19554,7 +19621,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, if (virDomainGetDiskErrorsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19595,7 +19662,7 @@ qemuDomainGetDiskErrors(virDomainPtr dom, ret = n; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19617,6 +19684,7 @@ qemuDomainSetMetadata(virDomainPtr dom, { virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; g_autoptr(virQEMUDriverConfig) cfg = NULL; int ret = -1; @@ -19627,11 +19695,12 @@ qemuDomainSetMetadata(virDomainPtr dom, return -1; cfg = virQEMUDriverGetConfig(driver); + priv = vm->privateData; if (virDomainSetMetadataEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; ret = virDomainObjSetMetadata(vm, type, metadata, key, uri, @@ -19644,7 +19713,7 @@ qemuDomainSetMetadata(virDomainPtr dom, virObjectEventStateQueue(driver->domainEventState, ev); } - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19753,7 +19822,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CURRENT_MACHINE)) return -1; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -19762,7 +19831,7 @@ qemuDomainQueryWakeupSuspendSupport(virDomainObjPtr vm, ret = qemuDomainProbeQMPCurrentMachine(vm, wakeupSupported); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -19772,9 +19841,11 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm, unsigned int target) { qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) return -1; if ((ret = virDomainObjCheckActive(vm)) < 0) @@ -19788,7 +19859,7 @@ qemuDomainPMSuspendAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -19878,24 +19949,24 @@ qemuDomainPMWakeup(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainPMWakeupEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) goto endjob; - priv = vm->privateData; - qemuDomainObjEnterMonitor(vm); ret = qemuMonitorSystemWakeup(priv->mon); if (qemuDomainObjExitMonitor(vm) < 0) ret = -1; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -19929,16 +20000,20 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, int ret = -1; char *result = NULL; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, NULL); if (!(vm = qemuDomainObjFromDomain(domain))) goto cleanup; + priv = vm->privateData; + if (virDomainQemuAgentCommandEnsureACL(domain->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -19956,7 +20031,7 @@ qemuDomainQemuAgentCommand(virDomainPtr domain, VIR_FREE(result); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -20015,6 +20090,7 @@ qemuDomainFSTrim(virDomainPtr dom, { virDomainObjPtr vm; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, -1); @@ -20029,10 +20105,13 @@ qemuDomainFSTrim(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainFSTrimEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -20046,7 +20125,7 @@ qemuDomainFSTrim(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -20200,9 +20279,11 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm, char **hostname) { qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -20217,7 +20298,7 @@ qemuDomainGetHostnameAgent(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -20232,8 +20313,9 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm, int n_leases; size_t i, j; int ret = -1; + qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -20275,7 +20357,7 @@ qemuDomainGetHostnameLease(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -20331,6 +20413,7 @@ qemuDomainGetTime(virDomainPtr dom, unsigned int flags) { virDomainObjPtr vm = NULL; + qemuDomainObjPrivatePtr priv; qemuAgentPtr agent; int ret = -1; int rv; @@ -20340,10 +20423,13 @@ qemuDomainGetTime(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return ret; + priv = vm->privateData; + if (virDomainGetTimeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20362,7 +20448,7 @@ qemuDomainGetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -20377,9 +20463,11 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm, bool rtcSync) { qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) return -1; if (virDomainObjCheckActive(vm) < 0) @@ -20393,7 +20481,7 @@ qemuDomainSetTimeAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -20435,7 +20523,7 @@ qemuDomainSetTime(virDomainPtr dom, if (qemuDomainSetTimeAgent(vm, seconds, nseconds, rtcSync) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20455,7 +20543,7 @@ qemuDomainSetTime(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -20471,16 +20559,20 @@ qemuDomainFSFreeze(virDomainPtr dom, { virDomainObjPtr vm; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainFSFreezeEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20489,7 +20581,7 @@ qemuDomainFSFreeze(virDomainPtr dom, ret = qemuDomainSnapshotFSFreeze(vm, mountpoints, nmountpoints); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -20505,6 +20597,7 @@ qemuDomainFSThaw(virDomainPtr dom, { virDomainObjPtr vm; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); @@ -20517,10 +20610,13 @@ qemuDomainFSThaw(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainFSThawEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -20529,7 +20625,7 @@ qemuDomainFSThaw(virDomainPtr dom, ret = qemuDomainSnapshotFSThaw(vm, true); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -21717,6 +21813,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, virErrorPtr orig_err = NULL; virDomainObjPtr *vms = NULL; virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; size_t nvms; virDomainStatsRecordPtr *tmpstats = NULL; bool enforce = !!(flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS); @@ -21764,6 +21861,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, virDomainStatsRecordPtr tmp = NULL; domflags = 0; vm = vms[i]; + priv = vm->privateData; virObjectLock(vm); @@ -21771,9 +21869,9 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, int rv; if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_NOWAIT) - rv = qemuDomainObjBeginJobNowait(vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJobNowait(vm, &priv->job, QEMU_JOB_QUERY); else - rv = qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY); + rv = qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY); if (rv == 0) domflags |= QEMU_DOMAIN_STATS_HAVE_JOB; @@ -21784,7 +21882,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, domflags |= QEMU_DOMAIN_STATS_BACKING; if (qemuDomainGetStats(conn, vm, stats, &tmp, domflags) < 0) { if (HAVE_JOB(domflags) && vm) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); virObjectUnlock(vm); goto cleanup; @@ -21794,7 +21892,7 @@ qemuConnectGetAllDomainStats(virConnectPtr conn, tmpstats[nstats++] = tmp; if (HAVE_JOB(domflags)) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); virObjectUnlock(vm); } @@ -21840,8 +21938,10 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm, { int ret = -1; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) return ret; if (virDomainObjCheckActive(vm) < 0) @@ -21855,7 +21955,7 @@ qemuDomainGetFSInfoAgent(virDomainObjPtr vm, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); return ret; } @@ -21939,19 +22039,22 @@ qemuDomainGetFSInfo(virDomainPtr dom, qemuAgentFSInfoPtr *agentinfo = NULL; int ret = -1; int nfs; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, ret); if (!(vm = qemuDomainObjFromDomain(dom))) return ret; + priv = vm->privateData; + if (virDomainGetFSInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; if ((nfs = qemuDomainGetFSInfoAgent(vm, &agentinfo)) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -21960,7 +22063,7 @@ qemuDomainGetFSInfo(virDomainPtr dom, ret = virDomainFSInfoFormat(agentinfo, nfs, vm->def, info); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: g_free(agentinfo); @@ -21977,6 +22080,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, { virDomainObjPtr vm = NULL; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, -1); @@ -21984,6 +22088,8 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainInterfaceAddressesEnsureACL(dom->conn, vm->def) < 0) goto cleanup; @@ -21996,7 +22102,8 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, break; case VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT: - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -22007,7 +22114,7 @@ qemuDomainInterfaceAddresses(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); break; @@ -22036,6 +22143,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, { virDomainObjPtr vm; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ret = -1; int rv; @@ -22044,10 +22152,13 @@ qemuDomainSetUserPassword(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) return ret; + priv = vm->privateData; + if (virDomainSetUserPasswordEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -22067,7 +22178,7 @@ qemuDomainSetUserPassword(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -22190,6 +22301,7 @@ static int qemuDomainRename(virDomainPtr dom, virQEMUDriverPtr driver = dom->conn->privateData; virDomainObjPtr vm = NULL; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, ret); @@ -22199,7 +22311,9 @@ static int qemuDomainRename(virDomainPtr dom, if (virDomainRenameEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + priv = vm->privateData; + + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { @@ -22246,7 +22360,7 @@ static int qemuDomainRename(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -22326,6 +22440,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, qemuAgentPtr agent; qemuAgentCPUInfoPtr info = NULL; int ninfo = 0; + qemuDomainObjPrivatePtr priv; int ret = -1; virCheckFlags(0, ret); @@ -22333,10 +22448,13 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainGetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, + QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -22355,7 +22473,7 @@ qemuDomainGetGuestVcpus(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: VIR_FREE(info); @@ -22374,6 +22492,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, virBitmapPtr map = NULL; qemuAgentCPUInfoPtr info = NULL; qemuAgentPtr agent; + qemuDomainObjPrivatePtr priv; int ninfo = 0; size_t i; int ret = -1; @@ -22391,10 +22510,12 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainSetGuestVcpusEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_MODIFY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_MODIFY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -22440,7 +22561,7 @@ qemuDomainSetGuestVcpus(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); cleanup: VIR_FREE(info); @@ -22463,6 +22584,7 @@ qemuDomainSetVcpu(virDomainPtr dom, virBitmapPtr map = NULL; ssize_t lastvcpu; int ret = -1; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -22484,10 +22606,12 @@ qemuDomainSetVcpu(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainSetVcpuEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -22514,7 +22638,7 @@ qemuDomainSetVcpu(virDomainPtr dom, ret = qemuDomainSetVcpuInternal(driver, vm, def, persistentDef, map, !!state); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virBitmapFree(map); @@ -22546,7 +22670,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, if (virDomainSetBlockThresholdEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -22583,7 +22707,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -22641,7 +22765,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, if (virDomainSetLifecycleActionEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0) @@ -22672,7 +22796,7 @@ qemuDomainSetLifecycleAction(virDomainPtr dom, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virDomainObjEndAPI(&vm); @@ -22761,10 +22885,11 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm, int ret = -1; g_autofree char *tmp = NULL; int maxpar = 0; + qemuDomainObjPrivatePtr priv = vm->privateData; virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) return -1; qemuDomainObjEnterMonitor(vm); @@ -22784,7 +22909,7 @@ qemuDomainGetSEVMeasurement(virDomainObjPtr vm, ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); return ret; } @@ -22952,6 +23077,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, size_t nfs = 0; qemuAgentFSInfoPtr *agentfsinfo = NULL; size_t i; + qemuDomainObjPrivatePtr priv; virCheckFlags(0, -1); @@ -22961,10 +23087,12 @@ qemuDomainGetGuestInfo(virDomainPtr dom, if (!(vm = qemuDomainObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (virDomainGetGuestInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainObjBeginAgentJob(vm, QEMU_AGENT_JOB_QUERY) < 0) + if (qemuDomainObjBeginAgentJob(vm, &priv->job, QEMU_AGENT_JOB_QUERY) < 0) goto cleanup; if (!qemuDomainAgentAvailable(vm, true)) @@ -23012,10 +23140,10 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuDomainObjExitAgent(vm, agent); endagentjob: - qemuDomainObjEndAgentJob(vm); + qemuDomainObjEndAgentJob(vm, &priv->job); if (nfs > 0) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_QUERY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_QUERY) < 0) goto cleanup; if (virDomainObjCheckActive(vm) < 0) @@ -23026,7 +23154,7 @@ qemuDomainGetGuestInfo(virDomainPtr dom, qemuAgentFSInfoFormatParams(agentfsinfo, nfs, vm->def, params, nparams, &maxparams); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } cleanup: diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 601c11221d..157e023694 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2005,7 +2005,7 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, switch ((qemuMigrationJobPhase) priv->job.phase) { case QEMU_MIGRATION_PHASE_BEGIN3: /* just forget we were about to migrate */ - qemuDomainObjDiscardAsyncJob(vm); + qemuDomainObjDiscardAsyncJob(vm, &priv->job); break; case QEMU_MIGRATION_PHASE_PERFORM3_DONE: @@ -2015,7 +2015,7 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, qemuMigrationParamsReset(vm, QEMU_ASYNC_JOB_MIGRATION_OUT, jobPriv->migParams, priv->job.apiFlags); /* clear the job and let higher levels decide what to do */ - qemuDomainObjDiscardAsyncJob(vm); + qemuDomainObjDiscardAsyncJob(vm, &priv->job); break; case QEMU_MIGRATION_PHASE_PERFORM3: @@ -2204,6 +2204,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, unsigned long flags) { virQEMUDriverPtr driver = conn->privateData; + qemuDomainObjPrivatePtr priv = vm->privateData; char *xml = NULL; qemuDomainAsyncJob asyncJob; @@ -2213,7 +2214,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, goto cleanup; asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT; } else { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; asyncJob = QEMU_ASYNC_JOB_NONE; } @@ -2258,7 +2259,7 @@ qemuMigrationSrcBegin(virConnectPtr conn, if (flags & VIR_MIGRATE_CHANGE_PROTECTION) qemuMigrationJobFinish(vm); else - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); goto cleanup; } @@ -2283,7 +2284,7 @@ qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver, if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) return; - qemuDomainObjDiscardAsyncJob(vm); + qemuDomainObjDiscardAsyncJob(vm, &priv->job); } static qemuProcessIncomingDefPtr @@ -5452,12 +5453,12 @@ qemuMigrationJobStart(virDomainObjPtr vm, JOB_MASK(QEMU_JOB_MIGRATION_OP); } - if (qemuDomainObjBeginAsyncJob(vm, job, op, apiFlags) < 0) + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, job, op, apiFlags) < 0) return -1; jobPriv->current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; - qemuDomainObjSetAsyncJobMask(vm, mask); + qemuDomainObjSetAsyncJobMask(&priv->job, mask); return 0; } @@ -5474,7 +5475,7 @@ qemuMigrationJobSetPhase(virDomainObjPtr vm, return; } - qemuDomainObjSetJobPhase(vm, phase); + qemuDomainObjSetJobPhase(vm, &priv->job, phase); } static void @@ -5487,7 +5488,8 @@ qemuMigrationJobStartPhase(virDomainObjPtr vm, static void qemuMigrationJobContinue(virDomainObjPtr vm) { - qemuDomainObjReleaseAsyncJob(vm); + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjReleaseAsyncJob(&priv->job); } static bool @@ -5513,7 +5515,8 @@ qemuMigrationJobIsActive(virDomainObjPtr vm, static void qemuMigrationJobFinish(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjEndAsyncJob(vm, &priv->job); } diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 7e4f5e2cfc..ed34956a97 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -422,7 +422,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, if (vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY || vm->def->onReboot == VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE) { - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -436,7 +436,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainAuditStop(vm, "destroyed"); qemuDomainRemoveInactive(driver, vm); endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); } ret = 0; @@ -467,7 +467,7 @@ qemuProcessFakeReboot(void *opaque) VIR_DEBUG("vm=%p", vm); virObjectLock(vm); - if (qemuDomainObjBeginJob(vm, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, QEMU_JOB_MODIFY) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -505,7 +505,7 @@ qemuProcessFakeReboot(void *opaque) ret = 0; endjob: - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: priv->pausedShutdown = false; @@ -3643,9 +3643,10 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver, jobObj->asyncOwnerAPI = virThreadJobGet(); jobObj->asyncStarted = now; - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MODIFY))); + qemuDomainObjSetAsyncJobMask(&priv->job, + (QEMU_JOB_DEFAULT_MASK | + JOB_MASK(QEMU_JOB_SUSPEND) | + JOB_MASK(QEMU_JOB_MODIFY))); /* We reset the job parameters for backup so that the job will look * active. This is possible because we are able to recover the state @@ -4572,11 +4573,13 @@ qemuProcessBeginJob(virDomainObjPtr vm, virDomainJobOperation operation, unsigned long apiFlags) { - if (qemuDomainObjBeginAsyncJob(vm, QEMU_ASYNC_JOB_START, + qemuDomainObjPrivatePtr priv = vm->privateData; + + if (qemuDomainObjBeginAsyncJob(vm, &priv->job, QEMU_ASYNC_JOB_START, operation, apiFlags) < 0) return -1; - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); + qemuDomainObjSetAsyncJobMask(&priv->job, QEMU_JOB_NONE); return 0; } @@ -4584,7 +4587,8 @@ qemuProcessBeginJob(virDomainObjPtr vm, void qemuProcessEndJob(virDomainObjPtr vm) { - qemuDomainObjEndAsyncJob(vm); + qemuDomainObjPrivatePtr priv = vm->privateData; + qemuDomainObjEndAsyncJob(vm, &priv->job); } @@ -7297,7 +7301,7 @@ qemuProcessBeginStopJob(virDomainObjPtr vm, /* Wake up anything waiting on domain condition */ virDomainObjBroadcast(vm); - if (qemuDomainObjBeginJob(vm, job) < 0) + if (qemuDomainObjBeginJob(vm, &priv->job, job) < 0) goto cleanup; ret = 0; @@ -7338,7 +7342,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, virErrorPreserveLast(&orig_err); if (asyncJob != QEMU_ASYNC_JOB_NONE) { - if (qemuDomainObjBeginNestedJob(vm, asyncJob) < 0) + if (qemuDomainObjBeginNestedJob(vm, &priv->job, asyncJob) < 0) goto cleanup; } else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE && priv->job.asyncOwner == virThreadSelfID() && @@ -7644,7 +7648,7 @@ void qemuProcessStop(virQEMUDriverPtr driver, endjob: if (asyncJob != QEMU_ASYNC_JOB_NONE) - qemuDomainObjEndJob(vm); + qemuDomainObjEndJob(vm, &priv->job); cleanup: virErrorRestore(&orig_err); @@ -7669,7 +7673,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, if (priv->job.asyncJob) { VIR_DEBUG("vm=%s has long-term job active, cancelling", dom->def->name); - qemuDomainObjDiscardAsyncJob(dom); + qemuDomainObjDiscardAsyncJob(dom, &priv->job); } VIR_DEBUG("Killing domain"); @@ -7687,7 +7691,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, qemuDomainRemoveInactive(driver, dom); - qemuDomainObjEndJob(dom); + qemuDomainObjEndJob(dom, &priv->job); virObjectEventStateQueue(driver->domainEventState, event); } @@ -8026,14 +8030,14 @@ qemuProcessReconnect(void *opaque) g_clear_object(&data->identity); VIR_FREE(data); - qemuDomainObjRestoreJob(obj, &oldjob); + priv = obj->privateData; + qemuDomainObjRestoreJob(&priv->job, &oldjob); if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; cfg = virQEMUDriverGetConfig(driver); - priv = obj->privateData; - if (qemuDomainObjBeginJob(obj, QEMU_JOB_MODIFY) < 0) + if (qemuDomainObjBeginJob(obj, &priv->job, QEMU_JOB_MODIFY) < 0) goto error; jobStarted = true; @@ -8260,7 +8264,7 @@ qemuProcessReconnect(void *opaque) if (jobStarted) { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactive(driver, obj); - qemuDomainObjEndJob(obj); + qemuDomainObjEndJob(obj, &priv->job); } else { if (!virDomainObjIsActive(obj)) qemuDomainRemoveInactiveJob(driver, obj); -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:48PM +0530, Prathamesh Chavan wrote:
References to `qemuDomainObjPrivatePtr` in qemu_domainjob were removed as it is a qemu-hypervisor specific pointer.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> ---
This patch looks good. Erik

`qemuMigrationJobPhase` was transformed into `virMigrationJobPhase` and a common util file `virmigration` was created to store its defination. Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> --- src/hypervisor/meson.build | 1 + src/hypervisor/virmigration.c | 41 ++++++++++++++++++++ src/hypervisor/virmigration.h | 38 +++++++++++++++++++ src/libvirt_private.syms | 4 ++ src/qemu/MIGRATION.txt | 8 ++-- src/qemu/qemu_domainjob.c | 4 +- src/qemu/qemu_migration.c | 70 +++++++++++++++++------------------ src/qemu/qemu_migration.h | 17 +-------- src/qemu/qemu_process.c | 48 ++++++++++++------------ 9 files changed, 150 insertions(+), 81 deletions(-) create mode 100644 src/hypervisor/virmigration.c create mode 100644 src/hypervisor/virmigration.h diff --git a/src/hypervisor/meson.build b/src/hypervisor/meson.build index 85149c683e..c81bdfa2fc 100644 --- a/src/hypervisor/meson.build +++ b/src/hypervisor/meson.build @@ -3,6 +3,7 @@ hypervisor_sources = [ 'domain_driver.c', 'virclosecallbacks.c', 'virhostdev.c', + 'virmigration.c', ] hypervisor_lib = static_library( diff --git a/src/hypervisor/virmigration.c b/src/hypervisor/virmigration.c new file mode 100644 index 0000000000..2cad5a6b1b --- /dev/null +++ b/src/hypervisor/virmigration.c @@ -0,0 +1,41 @@ +/* + * virmigration.c: hypervisor migration handling + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see + * <http://www.gnu.org/licenses/>. + */ + +#include <config.h> + +#include "virmigration.h" +#include "domain_driver.h" +#include "virlog.h" + +#define VIR_FROM_THIS VIR_FROM_DOMAIN + +VIR_LOG_INIT("util.migration"); + +VIR_ENUM_IMPL(virMigrationJobPhase, + VIR_MIGRATION_PHASE_LAST, + "none", + "perform2", + "begin3", + "perform3", + "perform3_done", + "confirm3_cancelled", + "confirm3", + "prepare", + "finish2", + "finish3", +); diff --git a/src/hypervisor/virmigration.h b/src/hypervisor/virmigration.h new file mode 100644 index 0000000000..e03d71c1bb --- /dev/null +++ b/src/hypervisor/virmigration.h @@ -0,0 +1,38 @@ +/* + * virmigration.h: hypervisor migration handling + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. If not, see + * <http://www.gnu.org/licenses/>. + */ + +#pragma once + +#include "virenum.h" + + +typedef enum { + VIR_MIGRATION_PHASE_NONE = 0, + VIR_MIGRATION_PHASE_PERFORM2, + VIR_MIGRATION_PHASE_BEGIN3, + VIR_MIGRATION_PHASE_PERFORM3, + VIR_MIGRATION_PHASE_PERFORM3_DONE, + VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED, + VIR_MIGRATION_PHASE_CONFIRM3, + VIR_MIGRATION_PHASE_PREPARE, + VIR_MIGRATION_PHASE_FINISH2, + VIR_MIGRATION_PHASE_FINISH3, + + VIR_MIGRATION_PHASE_LAST +} virMigrationJobPhase; +VIR_ENUM_DECL(virMigrationJobPhase); diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 01c2e710cd..cf78c2f27a 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -1474,6 +1474,10 @@ virHostdevUpdateActiveSCSIDevices; virHostdevUpdateActiveUSBDevices; +# hypervisor/virmigration.h +virMigrationJobPhaseTypeFromString; +virMigrationJobPhaseTypeToString; + # libvirt_internal.h virConnectSupportsFeature; virDomainMigrateBegin3; diff --git a/src/qemu/MIGRATION.txt b/src/qemu/MIGRATION.txt index e861fd001e..dd044c6064 100644 --- a/src/qemu/MIGRATION.txt +++ b/src/qemu/MIGRATION.txt @@ -74,7 +74,7 @@ The sequence of calling qemuMigrationJob* helper methods is as follows: migration type and version) has to start migration job and keep it active: qemuMigrationJobStart(driver, vm, QEMU_JOB_MIGRATION_{IN,OUT}); - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*); + qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_*); ...do work... qemuMigrationJobContinue(vm); @@ -82,7 +82,7 @@ The sequence of calling qemuMigrationJob* helper methods is as follows: if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT})) return; - qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*); + qemuMigrationJobStartPhase(driver, vm, VIR_MIGRATION_PHASE_*); ...do work... qemuMigrationJobContinue(vm); @@ -90,11 +90,11 @@ The sequence of calling qemuMigrationJob* helper methods is as follows: if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT})) return; - qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*); + qemuMigrationJobStartPhase(driver, vm, VIR_MIGRATION_PHASE_*); ...do work... qemuMigrationJobFinish(driver, vm); While migration job is running (i.e., after qemuMigrationJobStart* but before qemuMigrationJob{Continue,Finish}), migration phase can be advanced using - qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*); + qemuMigrationJobSetPhase(driver, vm, VIR_MIGRATION_PHASE_*); diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index ccbb7866b3..5b51aa5416 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -69,7 +69,7 @@ qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, switch (job) { case QEMU_ASYNC_JOB_MIGRATION_OUT: case QEMU_ASYNC_JOB_MIGRATION_IN: - return qemuMigrationJobPhaseTypeToString(phase); + return virMigrationJobPhaseTypeToString(phase); case QEMU_ASYNC_JOB_SAVE: case QEMU_ASYNC_JOB_DUMP: @@ -95,7 +95,7 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, switch (job) { case QEMU_ASYNC_JOB_MIGRATION_OUT: case QEMU_ASYNC_JOB_MIGRATION_IN: - return qemuMigrationJobPhaseTypeFromString(phase); + return virMigrationJobPhaseTypeFromString(phase); case QEMU_ASYNC_JOB_SAVE: case QEMU_ASYNC_JOB_DUMP: diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 157e023694..38676cc5fa 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -67,8 +67,8 @@ VIR_LOG_INIT("qemu.qemu_migration"); -VIR_ENUM_IMPL(qemuMigrationJobPhase, - QEMU_MIGRATION_PHASE_LAST, +VIR_ENUM_IMPL(virMigrationJobPhase, + VIR_MIGRATION_PHASE_LAST, "none", "perform2", "begin3", @@ -89,12 +89,12 @@ qemuMigrationJobStart(virDomainObjPtr vm, static void qemuMigrationJobSetPhase(virDomainObjPtr vm, - qemuMigrationJobPhase phase) + virMigrationJobPhase phase) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); static void qemuMigrationJobStartPhase(virDomainObjPtr vm, - qemuMigrationJobPhase phase) + virMigrationJobPhase phase) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); static void @@ -2002,13 +2002,13 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, " was closed; canceling the migration", vm->def->name); - switch ((qemuMigrationJobPhase) priv->job.phase) { - case QEMU_MIGRATION_PHASE_BEGIN3: + switch ((virMigrationJobPhase) priv->job.phase) { + case VIR_MIGRATION_PHASE_BEGIN3: /* just forget we were about to migrate */ qemuDomainObjDiscardAsyncJob(vm, &priv->job); break; - case QEMU_MIGRATION_PHASE_PERFORM3_DONE: + case VIR_MIGRATION_PHASE_PERFORM3_DONE: VIR_WARN("Migration of domain %s finished but we don't know if the" " domain was successfully started on destination or not", vm->def->name); @@ -2018,19 +2018,19 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm, qemuDomainObjDiscardAsyncJob(vm, &priv->job); break; - case QEMU_MIGRATION_PHASE_PERFORM3: + case VIR_MIGRATION_PHASE_PERFORM3: /* cannot be seen without an active migration API; unreachable */ - case QEMU_MIGRATION_PHASE_CONFIRM3: - case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED: + case VIR_MIGRATION_PHASE_CONFIRM3: + case VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED: /* all done; unreachable */ - case QEMU_MIGRATION_PHASE_PREPARE: - case QEMU_MIGRATION_PHASE_FINISH2: - case QEMU_MIGRATION_PHASE_FINISH3: + case VIR_MIGRATION_PHASE_PREPARE: + case VIR_MIGRATION_PHASE_FINISH2: + case VIR_MIGRATION_PHASE_FINISH3: /* incoming migration; unreachable */ - case QEMU_MIGRATION_PHASE_PERFORM2: + case VIR_MIGRATION_PHASE_PERFORM2: /* single phase outgoing migration; unreachable */ - case QEMU_MIGRATION_PHASE_NONE: - case QEMU_MIGRATION_PHASE_LAST: + case VIR_MIGRATION_PHASE_NONE: + case VIR_MIGRATION_PHASE_LAST: /* unreachable */ ; } @@ -2066,7 +2066,7 @@ qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver, * change protection. */ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_BEGIN3); if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags)) return NULL; @@ -2525,7 +2525,7 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver, if (qemuMigrationJobStart(vm, QEMU_ASYNC_JOB_MIGRATION_IN, flags) < 0) goto cleanup; - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PREPARE); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PREPARE); /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; @@ -2987,8 +2987,8 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver, virCheckFlags(QEMU_MIGRATION_FLAGS, -1); qemuMigrationJobSetPhase(vm, retcode == 0 - ? QEMU_MIGRATION_PHASE_CONFIRM3 - : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED); + ? VIR_MIGRATION_PHASE_CONFIRM3 + : VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED); if (!(mig = qemuMigrationEatCookie(driver, vm->def, priv->origname, priv, cookiein, cookieinlen, @@ -3077,7 +3077,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, unsigned int flags, int cancelled) { - qemuMigrationJobPhase phase; + virMigrationJobPhase phase; virQEMUDriverConfigPtr cfg = NULL; int ret = -1; @@ -3087,9 +3087,9 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver, goto cleanup; if (cancelled) - phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED; + phase = VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED; else - phase = QEMU_MIGRATION_PHASE_CONFIRM3; + phase = VIR_MIGRATION_PHASE_CONFIRM3; qemuMigrationJobStartPhase(vm, phase); virCloseCallbacksUnset(driver->closeCallbacks, vm, @@ -4031,7 +4031,7 @@ qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver, * until the migration is complete. */ VIR_DEBUG("Perform %p", sconn); - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM2); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM2); if (flags & VIR_MIGRATE_TUNNELLED) ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL, NULL, 0, NULL, NULL, @@ -4269,7 +4269,7 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver, * confirm migration completion. */ VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri)); - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM3); VIR_FREE(cookiein); cookiein = g_steal_pointer(&cookieout); cookieinlen = cookieoutlen; @@ -4294,7 +4294,7 @@ qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver, if (ret < 0) { virErrorPreserveLast(&orig_err); } else { - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM3_DONE); } /* If Perform returns < 0, then we need to cancel the VM @@ -4657,7 +4657,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver, migParams, flags, dname, resource, &v3proto); } else { - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM2); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM2); ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen, cookieout, cookieoutlen, flags, resource, NULL, NULL, 0, NULL, @@ -4742,7 +4742,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, return ret; } - qemuMigrationJobStartPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3); + qemuMigrationJobStartPhase(vm, VIR_MIGRATION_PHASE_PERFORM3); virCloseCallbacksUnset(driver->closeCallbacks, vm, qemuMigrationSrcCleanup); @@ -4756,7 +4756,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver, goto endjob; } - qemuMigrationJobSetPhase(vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE); + qemuMigrationJobSetPhase(vm, VIR_MIGRATION_PHASE_PERFORM3_DONE); if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn, qemuMigrationSrcCleanup) < 0) @@ -4989,8 +4989,8 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver, ignore_value(virTimeMillisNow(&timeReceived)); qemuMigrationJobStartPhase(vm, - v3proto ? QEMU_MIGRATION_PHASE_FINISH3 - : QEMU_MIGRATION_PHASE_FINISH2); + v3proto ? VIR_MIGRATION_PHASE_FINISH3 + : VIR_MIGRATION_PHASE_FINISH2); qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup); g_clear_pointer(&jobPriv->completed, qemuDomainJobInfoFree); @@ -5464,14 +5464,14 @@ qemuMigrationJobStart(virDomainObjPtr vm, static void qemuMigrationJobSetPhase(virDomainObjPtr vm, - qemuMigrationJobPhase phase) + virMigrationJobPhase phase) { qemuDomainObjPrivatePtr priv = vm->privateData; if (phase < priv->job.phase) { VIR_ERROR(_("migration protocol going backwards %s => %s"), - qemuMigrationJobPhaseTypeToString(priv->job.phase), - qemuMigrationJobPhaseTypeToString(phase)); + virMigrationJobPhaseTypeToString(priv->job.phase), + virMigrationJobPhaseTypeToString(phase)); return; } @@ -5480,7 +5480,7 @@ qemuMigrationJobSetPhase(virDomainObjPtr vm, static void qemuMigrationJobStartPhase(virDomainObjPtr vm, - qemuMigrationJobPhase phase) + virMigrationJobPhase phase) { qemuMigrationJobSetPhase(vm, phase); } diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index e99351ef82..8f5e2d0f81 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -24,6 +24,7 @@ #include "qemu_conf.h" #include "qemu_domain.h" #include "qemu_migration_params.h" +#include "virmigration.h" #include "virenum.h" /* @@ -87,22 +88,6 @@ NULL -typedef enum { - QEMU_MIGRATION_PHASE_NONE = 0, - QEMU_MIGRATION_PHASE_PERFORM2, - QEMU_MIGRATION_PHASE_BEGIN3, - QEMU_MIGRATION_PHASE_PERFORM3, - QEMU_MIGRATION_PHASE_PERFORM3_DONE, - QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED, - QEMU_MIGRATION_PHASE_CONFIRM3, - QEMU_MIGRATION_PHASE_PREPARE, - QEMU_MIGRATION_PHASE_FINISH2, - QEMU_MIGRATION_PHASE_FINISH3, - - QEMU_MIGRATION_PHASE_LAST -} qemuMigrationJobPhase; -VIR_ENUM_DECL(qemuMigrationJobPhase); - char * qemuMigrationSrcBegin(virConnectPtr conn, virDomainObjPtr vm, diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index ed34956a97..34631e3464 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -3429,24 +3429,24 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, (state == VIR_DOMAIN_RUNNING && reason == VIR_DOMAIN_RUNNING_POSTCOPY); - switch ((qemuMigrationJobPhase) job->phase) { - case QEMU_MIGRATION_PHASE_NONE: - case QEMU_MIGRATION_PHASE_PERFORM2: - case QEMU_MIGRATION_PHASE_BEGIN3: - case QEMU_MIGRATION_PHASE_PERFORM3: - case QEMU_MIGRATION_PHASE_PERFORM3_DONE: - case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED: - case QEMU_MIGRATION_PHASE_CONFIRM3: - case QEMU_MIGRATION_PHASE_LAST: + switch ((virMigrationJobPhase) job->phase) { + case VIR_MIGRATION_PHASE_NONE: + case VIR_MIGRATION_PHASE_PERFORM2: + case VIR_MIGRATION_PHASE_BEGIN3: + case VIR_MIGRATION_PHASE_PERFORM3: + case VIR_MIGRATION_PHASE_PERFORM3_DONE: + case VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED: + case VIR_MIGRATION_PHASE_CONFIRM3: + case VIR_MIGRATION_PHASE_LAST: /* N/A for incoming migration */ break; - case QEMU_MIGRATION_PHASE_PREPARE: + case VIR_MIGRATION_PHASE_PREPARE: VIR_DEBUG("Killing unfinished incoming migration for domain %s", vm->def->name); return -1; - case QEMU_MIGRATION_PHASE_FINISH2: + case VIR_MIGRATION_PHASE_FINISH2: /* source domain is already killed so let's just resume the domain * and hope we are all set */ VIR_DEBUG("Incoming migration finished, resuming domain %s", @@ -3458,7 +3458,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, } break; - case QEMU_MIGRATION_PHASE_FINISH3: + case VIR_MIGRATION_PHASE_FINISH3: /* migration finished, we started resuming the domain but didn't * confirm success or failure yet; killing it seems safest unless * we already started guest CPUs or we were in post-copy mode */ @@ -3490,22 +3490,22 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED); bool resume = false; - switch ((qemuMigrationJobPhase) job->phase) { - case QEMU_MIGRATION_PHASE_NONE: - case QEMU_MIGRATION_PHASE_PREPARE: - case QEMU_MIGRATION_PHASE_FINISH2: - case QEMU_MIGRATION_PHASE_FINISH3: - case QEMU_MIGRATION_PHASE_LAST: + switch ((virMigrationJobPhase) job->phase) { + case VIR_MIGRATION_PHASE_NONE: + case VIR_MIGRATION_PHASE_PREPARE: + case VIR_MIGRATION_PHASE_FINISH2: + case VIR_MIGRATION_PHASE_FINISH3: + case VIR_MIGRATION_PHASE_LAST: /* N/A for outgoing migration */ break; - case QEMU_MIGRATION_PHASE_BEGIN3: + case VIR_MIGRATION_PHASE_BEGIN3: /* nothing happened so far, just forget we were about to migrate the * domain */ break; - case QEMU_MIGRATION_PHASE_PERFORM2: - case QEMU_MIGRATION_PHASE_PERFORM3: + case VIR_MIGRATION_PHASE_PERFORM2: + case VIR_MIGRATION_PHASE_PERFORM3: /* migration is still in progress, let's cancel it and resume the * domain; however we can only do that before migration enters * post-copy mode @@ -3523,7 +3523,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } break; - case QEMU_MIGRATION_PHASE_PERFORM3_DONE: + case VIR_MIGRATION_PHASE_PERFORM3_DONE: /* migration finished but we didn't have a chance to get the result * of Finish3 step; third party needs to check what to do next; in * post-copy mode we can use PAUSED_POSTCOPY_FAILED state for this @@ -3532,7 +3532,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, qemuMigrationAnyPostcopyFailed(driver, vm); break; - case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED: + case VIR_MIGRATION_PHASE_CONFIRM3_CANCELLED: /* Finish3 failed, we need to resume the domain, but once we enter * post-copy mode there's no way back, so let's just mark the domain * as broken in that case @@ -3546,7 +3546,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, } break; - case QEMU_MIGRATION_PHASE_CONFIRM3: + case VIR_MIGRATION_PHASE_CONFIRM3: /* migration completed, we need to kill the domain here */ *stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED; return -1; -- 2.25.1

On Tue, Aug 04, 2020 at 08:06:49PM +0530, Prathamesh Chavan wrote:
`qemuMigrationJobPhase` was transformed into `virMigrationJobPhase` and a common util file `virmigration` was created to store its defination.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> ---
You basically just moved the QEMU migration enums which as a patch on its own doesn't make much sense, other migration bits that can be made agnostic would have to either follow or be part of the same patch. Erik

On Wed, Aug 12, 2020 at 5:42 PM Erik Skultety <eskultet@redhat.com> wrote:
On Tue, Aug 04, 2020 at 08:06:49PM +0530, Prathamesh Chavan wrote:
`qemuMigrationJobPhase` was transformed into `virMigrationJobPhase` and a common util file `virmigration` was created to store its defination.
Signed-off-by: Prathamesh Chavan <pc44800@gmail.com> ---
You basically just moved the QEMU migration enums which as a patch on its own doesn't make much sense, other migration bits that can be made agnostic would have to either follow or be part of the same patch.
Yes, the other bits, which should be made hypervisor agnostic should follow this, but right now this is required for making the only the domain jobs hypervisor-agnostic. Thanks, Prathamesh Chavan
participants (2)
-
Erik Skultety
-
Prathamesh Chavan