[PATCH 0/3] src: use virDomainJobData

This series is the continued rewriting of the jobs functions with more to come in the future. Kristina Hanicova (3): qemu: use generalized virDomainJobData instead of qemuDomainJobInfo qemu: make separate function for setting statsType of privateData libxl: use virDomainJobData instead of virDomainJobInfo src/hypervisor/domain_job.c | 78 +++++++++++ src/hypervisor/domain_job.h | 72 ++++++++++ src/hypervisor/meson.build | 1 + src/libvirt_private.syms | 7 + src/libxl/libxl_domain.c | 10 +- src/libxl/libxl_domain.h | 3 +- src/libxl/libxl_driver.c | 14 +- src/qemu/qemu_backup.c | 40 +++--- src/qemu/qemu_backup.h | 4 +- src/qemu/qemu_domain.c | 8 ++ src/qemu/qemu_domain.h | 3 + src/qemu/qemu_domainjob.c | 227 +++++++++++++++---------------- src/qemu/qemu_domainjob.h | 54 ++------ src/qemu/qemu_driver.c | 109 ++++++++------- src/qemu/qemu_migration.c | 187 +++++++++++++------------ src/qemu/qemu_migration.h | 4 +- src/qemu/qemu_migration_cookie.c | 60 ++++---- src/qemu/qemu_migration_cookie.h | 2 +- src/qemu/qemu_process.c | 23 ++-- src/qemu/qemu_snapshot.c | 3 +- 20 files changed, 539 insertions(+), 370 deletions(-) create mode 100644 src/hypervisor/domain_job.c create mode 100644 src/hypervisor/domain_job.h -- 2.34.1

This patch includes: * introducing new files: src/hypervisor/domain_job.c and src/hypervisor/domain_job.h * new struct virDomainJobData, which is almost the same as qemuDomainJobInfo - the only differences are moving qemu specific job stats into the qemuDomainJobDataPrivate and adding jobType (possibly more attributes in the future if needed). * moving qemuDomainJobStatus to the domain_job.h and renaming it as virDomainJobStatus * moving and renaming qemuDomainJobStatusToType * adding callback struct virDomainJobDataPrivateDataCallbacks taking care of allocation, copying and freeing of private data of virDomainJobData * adding functions for virDomainJobDataPrivateDataCallbacks for qemu hypervisor * adding 'public' (public between the different hypervisors) functions taking care of init, copy, free of virDomainJobData * renaming every occurrence of qemuDomainJobInfo *info to virDomainJobData *data Signed-off-by: Kristina Hanicova <khanicov@redhat.com> --- src/hypervisor/domain_job.c | 78 +++++++++++ src/hypervisor/domain_job.h | 72 ++++++++++ src/hypervisor/meson.build | 1 + src/libvirt_private.syms | 7 + src/qemu/qemu_backup.c | 40 +++--- src/qemu/qemu_backup.h | 4 +- src/qemu/qemu_domainjob.c | 227 +++++++++++++++---------------- src/qemu/qemu_domainjob.h | 54 ++------ src/qemu/qemu_driver.c | 111 ++++++++------- src/qemu/qemu_migration.c | 188 +++++++++++++------------ src/qemu/qemu_migration.h | 4 +- src/qemu/qemu_migration_cookie.c | 60 ++++---- src/qemu/qemu_migration_cookie.h | 2 +- src/qemu/qemu_process.c | 24 ++-- src/qemu/qemu_snapshot.c | 4 +- 15 files changed, 517 insertions(+), 359 deletions(-) create mode 100644 src/hypervisor/domain_job.c create mode 100644 src/hypervisor/domain_job.h diff --git a/src/hypervisor/domain_job.c b/src/hypervisor/domain_job.c new file mode 100644 index 0000000000..daccbe4a69 --- /dev/null +++ b/src/hypervisor/domain_job.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2022 Red Hat, Inc. + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#include <config.h> +#include <string.h> + +#include "domain_job.h" + + +virDomainJobData * +virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb) +{ + virDomainJobData *ret = g_new0(virDomainJobData, 1); + + ret->privateDataCb = cb; + + if (ret->privateDataCb && ret->privateDataCb->allocPrivateData) + ret->privateData = ret->privateDataCb->allocPrivateData(); + + return ret; +} + +virDomainJobData * +virDomainJobDataCopy(virDomainJobData *data) +{ + virDomainJobData *ret = g_new0(virDomainJobData, 1); + + memcpy(ret, data, sizeof(*data)); + + if (ret->privateDataCb && data->privateDataCb->copyPrivateData) + ret->privateData = data->privateDataCb->copyPrivateData(data->privateData); + + ret->errmsg = g_strdup(data->errmsg); + + return ret; +} + +void +virDomainJobDataFree(virDomainJobData *data) +{ + if (!data) + return; + + if (data->privateDataCb && data->privateDataCb->freePrivateData) + data->privateDataCb->freePrivateData(data->privateData); + + g_free(data->errmsg); + g_free(data); +} + +virDomainJobType +virDomainJobStatusToType(virDomainJobStatus status) +{ + switch (status) { + case VIR_DOMAIN_JOB_STATUS_NONE: + break; + + case VIR_DOMAIN_JOB_STATUS_ACTIVE: + case VIR_DOMAIN_JOB_STATUS_MIGRATING: + case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_POSTCOPY: + case VIR_DOMAIN_JOB_STATUS_PAUSED: + return VIR_DOMAIN_JOB_UNBOUNDED; + + case VIR_DOMAIN_JOB_STATUS_COMPLETED: + return VIR_DOMAIN_JOB_COMPLETED; + + case VIR_DOMAIN_JOB_STATUS_FAILED: + return VIR_DOMAIN_JOB_FAILED; + + case VIR_DOMAIN_JOB_STATUS_CANCELED: + return VIR_DOMAIN_JOB_CANCELLED; + } + + return VIR_DOMAIN_JOB_NONE; +} diff --git a/src/hypervisor/domain_job.h b/src/hypervisor/domain_job.h new file mode 100644 index 0000000000..257ef067e4 --- /dev/null +++ b/src/hypervisor/domain_job.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2022 Red Hat, Inc. + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#pragma once + +#include "internal.h" + +typedef enum { + VIR_DOMAIN_JOB_STATUS_NONE = 0, + VIR_DOMAIN_JOB_STATUS_ACTIVE, + VIR_DOMAIN_JOB_STATUS_MIGRATING, + VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED, + VIR_DOMAIN_JOB_STATUS_PAUSED, + VIR_DOMAIN_JOB_STATUS_POSTCOPY, + VIR_DOMAIN_JOB_STATUS_COMPLETED, + VIR_DOMAIN_JOB_STATUS_FAILED, + VIR_DOMAIN_JOB_STATUS_CANCELED, +} virDomainJobStatus; + +typedef void *(*virDomainJobDataPrivateDataAlloc) (void); +typedef void *(*virDomainJobDataPrivateDataCopy) (void *); +typedef void (*virDomainJobDataPrivateDataFree) (void *); + +typedef struct _virDomainJobDataPrivateDataCallbacks virDomainJobDataPrivateDataCallbacks; +struct _virDomainJobDataPrivateDataCallbacks { + virDomainJobDataPrivateDataAlloc allocPrivateData; + virDomainJobDataPrivateDataCopy copyPrivateData; + virDomainJobDataPrivateDataFree freePrivateData; +}; + +typedef struct _virDomainJobData virDomainJobData; +struct _virDomainJobData { + virDomainJobType jobType; + + virDomainJobStatus status; + virDomainJobOperation operation; + unsigned long long started; /* When the async job started */ + unsigned long long stopped; /* When the domain's CPUs were stopped */ + unsigned long long sent; /* When the source sent status info to the + destination (only for migrations). */ + unsigned long long received; /* When the destination host received status + info from the source (migrations only). */ + /* Computed values */ + unsigned long long timeElapsed; + long long timeDelta; /* delta = received - sent, i.e., the difference between + the source and the destination time plus the time + between the end of Perform phase on the source and + the beginning of Finish phase on the destination. */ + bool timeDeltaSet; + + char *errmsg; /* optional error message for failed completed jobs */ + + void *privateData; /* private data of hypervisors */ + virDomainJobDataPrivateDataCallbacks *privateDataCb; /* callbacks of private data, hypervisor based */ +}; + + +virDomainJobData * +virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb); + +void +virDomainJobDataFree(virDomainJobData *data); + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(virDomainJobData, virDomainJobDataFree); + +virDomainJobData * +virDomainJobDataCopy(virDomainJobData *data); + +virDomainJobType +virDomainJobStatusToType(virDomainJobStatus status); diff --git a/src/hypervisor/meson.build b/src/hypervisor/meson.build index 70801c0820..ec11ec0cd8 100644 --- a/src/hypervisor/meson.build +++ b/src/hypervisor/meson.build @@ -3,6 +3,7 @@ hypervisor_sources = [ 'domain_driver.c', 'virclosecallbacks.c', 'virhostdev.c', + 'domain_job.c', ] stateful_driver_source_files += files(hypervisor_sources) diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index ba3462d849..d648059e16 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -1565,6 +1565,13 @@ virDomainDriverParseBlkioDeviceStr; virDomainDriverSetupPersistentDefBlkioParams; +# hypervisor/domain_job.h +virDomainJobDataCopy; +virDomainJobDataFree; +virDomainJobDataInit; +virDomainJobStatusToType; + + # hypervisor/virclosecallbacks.h virCloseCallbacksGet; virCloseCallbacksGetConn; diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 304a0d5a4f..081c4d023f 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -555,7 +555,7 @@ qemuBackupBeginPullExportDisks(virDomainObj *vm, void qemuBackupJobTerminate(virDomainObj *vm, - qemuDomainJobStatus jobstatus) + virDomainJobStatus jobstatus) { qemuDomainObjPrivate *priv = vm->privateData; @@ -583,7 +583,7 @@ qemuBackupJobTerminate(virDomainObj *vm, !(priv->backup->apiFlags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL) && (priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL || (priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH && - jobstatus != QEMU_DOMAIN_JOB_STATUS_COMPLETED))) { + jobstatus != VIR_DOMAIN_JOB_STATUS_COMPLETED))) { uid_t uid; gid_t gid; @@ -600,15 +600,19 @@ qemuBackupJobTerminate(virDomainObj *vm, } if (priv->job.current) { + qemuDomainJobDataPrivate *privData = NULL; + qemuDomainJobInfoUpdateTime(priv->job.current); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); - priv->job.completed = qemuDomainJobInfoCopy(priv->job.current); + g_clear_pointer(&priv->job.completed, virDomainJobDataFree); + priv->job.completed = virDomainJobDataCopy(priv->job.current); + + privData = priv->job.completed->privateData; - priv->job.completed->stats.backup.total = priv->backup->push_total; - priv->job.completed->stats.backup.transferred = priv->backup->push_transferred; - priv->job.completed->stats.backup.tmp_used = priv->backup->pull_tmp_used; - priv->job.completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; + privData->stats.backup.total = priv->backup->push_total; + privData->stats.backup.transferred = priv->backup->push_transferred; + privData->stats.backup.tmp_used = priv->backup->pull_tmp_used; + privData->stats.backup.tmp_total = priv->backup->pull_tmp_total; priv->job.completed->status = jobstatus; priv->job.completed->errmsg = g_strdup(priv->backup->errmsg); @@ -687,7 +691,7 @@ qemuBackupJobCancelBlockjobs(virDomainObj *vm, } if (terminatebackup && !has_active) - qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED); + qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED); } @@ -742,6 +746,7 @@ qemuBackupBegin(virDomainObj *vm, unsigned int flags) { qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privData = priv->job.current->privateData; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virDomainBackupDef) def = NULL; g_autofree char *suffix = NULL; @@ -795,7 +800,7 @@ qemuBackupBegin(virDomainObj *vm, qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_MODIFY))); - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + privData->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", @@ -985,7 +990,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm, bool has_cancelling = false; bool has_cancelled = false; bool has_failed = false; - qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + virDomainJobStatus jobstatus = VIR_DOMAIN_JOB_STATUS_COMPLETED; virDomainBackupDef *backup = priv->backup; size_t i; @@ -1082,9 +1087,9 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm, /* all sub-jobs have stopped */ if (has_failed) - jobstatus = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobstatus = VIR_DOMAIN_JOB_STATUS_FAILED; else if (has_cancelled && backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH) - jobstatus = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobstatus = VIR_DOMAIN_JOB_STATUS_CANCELED; qemuBackupJobTerminate(vm, jobstatus); } @@ -1135,9 +1140,10 @@ qemuBackupGetJobInfoStatsUpdateOne(virDomainObj *vm, int qemuBackupGetJobInfoStats(virQEMUDriver *driver, virDomainObj *vm, - qemuDomainJobInfo *jobInfo) + virDomainJobData *jobData) { - qemuDomainBackupStats *stats = &jobInfo->stats.backup; + qemuDomainJobDataPrivate *privJob = jobData->privateData; + qemuDomainBackupStats *stats = &privJob->stats.backup; qemuDomainObjPrivate *priv = vm->privateData; qemuMonitorJobInfo **blockjobs = NULL; size_t nblockjobs = 0; @@ -1151,10 +1157,10 @@ qemuBackupGetJobInfoStats(virQEMUDriver *driver, return -1; } - if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) + if (qemuDomainJobInfoUpdateTime(jobData) < 0) return -1; - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobData->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; qemuDomainObjEnterMonitor(driver, vm); diff --git a/src/qemu/qemu_backup.h b/src/qemu/qemu_backup.h index ebb3154516..de4dff9357 100644 --- a/src/qemu/qemu_backup.h +++ b/src/qemu/qemu_backup.h @@ -45,12 +45,12 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm, void qemuBackupJobTerminate(virDomainObj *vm, - qemuDomainJobStatus jobstatus); + virDomainJobStatus jobstatus); int qemuBackupGetJobInfoStats(virQEMUDriver *driver, virDomainObj *vm, - qemuDomainJobInfo *jobInfo); + virDomainJobData *jobData); /* exported for testing */ int diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 1ecde5af86..1d9bec2cfd 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c @@ -63,6 +63,38 @@ VIR_ENUM_IMPL(qemuDomainAsyncJob, "backup", ); +static void * +qemuJobDataAllocPrivateData(void) +{ + return g_new0(qemuDomainJobDataPrivate, 1); +} + + +static void * +qemuJobDataCopyPrivateData(void *data) +{ + qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1); + + memcpy(ret, data, sizeof(qemuDomainJobDataPrivate)); + + return ret; +} + + +static void +qemuJobDataFreePrivateData(void *data) +{ + g_free(data); +} + + +virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks = { + .allocPrivateData = qemuJobDataAllocPrivateData, + .copyPrivateData = qemuJobDataCopyPrivateData, + .freePrivateData = qemuJobDataFreePrivateData, +}; + + const char * qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, int phase G_GNUC_UNUSED) @@ -116,26 +148,6 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, } -void -qemuDomainJobInfoFree(qemuDomainJobInfo *info) -{ - g_free(info->errmsg); - g_free(info); -} - - -qemuDomainJobInfo * -qemuDomainJobInfoCopy(qemuDomainJobInfo *info) -{ - qemuDomainJobInfo *ret = g_new0(qemuDomainJobInfo, 1); - - memcpy(ret, info, sizeof(*info)); - - ret->errmsg = g_strdup(info->errmsg); - - return ret; -} - void qemuDomainEventEmitJobCompleted(virQEMUDriver *driver, virDomainObj *vm) @@ -149,7 +161,7 @@ qemuDomainEventEmitJobCompleted(virQEMUDriver *driver, if (!priv->job.completed) return; - if (qemuDomainJobInfoToParams(priv->job.completed, &type, + if (qemuDomainJobDataToParams(priv->job.completed, &type, ¶ms, &nparams) < 0) { VIR_WARN("Could not get stats for completed job; domain %s", vm->def->name); @@ -216,7 +228,7 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObj *job) job->mask = QEMU_JOB_DEFAULT_MASK; job->abortJob = false; VIR_FREE(job->error); - g_clear_pointer(&job->current, qemuDomainJobInfoFree); + g_clear_pointer(&job->current, virDomainJobDataFree); job->cb->resetJobPrivate(job->privateData); job->apiFlags = 0; } @@ -254,8 +266,8 @@ qemuDomainObjClearJob(qemuDomainJobObj *job) qemuDomainObjResetJob(job); qemuDomainObjResetAsyncJob(job); g_clear_pointer(&job->privateData, job->cb->freeJobPrivate); - g_clear_pointer(&job->current, qemuDomainJobInfoFree); - g_clear_pointer(&job->completed, qemuDomainJobInfoFree); + g_clear_pointer(&job->current, virDomainJobDataFree); + g_clear_pointer(&job->completed, virDomainJobDataFree); virCondDestroy(&job->cond); virCondDestroy(&job->asyncCond); } @@ -268,111 +280,87 @@ qemuDomainTrackJob(qemuDomainJob job) int -qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo) +qemuDomainJobInfoUpdateTime(virDomainJobData *jobData) { unsigned long long now; - if (!jobInfo->started) + if (!jobData->started) return 0; if (virTimeMillisNow(&now) < 0) return -1; - if (now < jobInfo->started) { + if (now < jobData->started) { VIR_WARN("Async job starts in the future"); - jobInfo->started = 0; + jobData->started = 0; return 0; } - jobInfo->timeElapsed = now - jobInfo->started; + jobData->timeElapsed = now - jobData->started; return 0; } int -qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo) +qemuDomainJobInfoUpdateDowntime(virDomainJobData *jobData) { unsigned long long now; + qemuDomainJobDataPrivate *priv = jobData->privateData; - if (!jobInfo->stopped) + if (!jobData->stopped) return 0; if (virTimeMillisNow(&now) < 0) return -1; - if (now < jobInfo->stopped) { + if (now < jobData->stopped) { VIR_WARN("Guest's CPUs stopped in the future"); - jobInfo->stopped = 0; + jobData->stopped = 0; return 0; } - jobInfo->stats.mig.downtime = now - jobInfo->stopped; - jobInfo->stats.mig.downtime_set = true; + priv->stats.mig.downtime = now - jobData->stopped; + priv->stats.mig.downtime_set = true; return 0; } -static virDomainJobType -qemuDomainJobStatusToType(qemuDomainJobStatus status) -{ - switch (status) { - case QEMU_DOMAIN_JOB_STATUS_NONE: - break; - - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: - return VIR_DOMAIN_JOB_UNBOUNDED; - - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - return VIR_DOMAIN_JOB_COMPLETED; - - case QEMU_DOMAIN_JOB_STATUS_FAILED: - return VIR_DOMAIN_JOB_FAILED; - - case QEMU_DOMAIN_JOB_STATUS_CANCELED: - return VIR_DOMAIN_JOB_CANCELLED; - } - - return VIR_DOMAIN_JOB_NONE; -} int -qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo, +qemuDomainJobInfoToInfo(virDomainJobData *jobData, virDomainJobInfoPtr info) { - info->type = qemuDomainJobStatusToType(jobInfo->status); - info->timeElapsed = jobInfo->timeElapsed; + qemuDomainJobDataPrivate *priv = jobData->privateData; + info->type = virDomainJobStatusToType(jobData->status); + info->timeElapsed = jobData->timeElapsed; - switch (jobInfo->statsType) { + switch (priv->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; - info->fileTotal = jobInfo->stats.mig.disk_total + - jobInfo->mirrorStats.total; - info->fileRemaining = jobInfo->stats.mig.disk_remaining + - (jobInfo->mirrorStats.total - - jobInfo->mirrorStats.transferred); - info->fileProcessed = jobInfo->stats.mig.disk_transferred + - jobInfo->mirrorStats.transferred; + info->memTotal = priv->stats.mig.ram_total; + info->memRemaining = priv->stats.mig.ram_remaining; + info->memProcessed = priv->stats.mig.ram_transferred; + info->fileTotal = priv->stats.mig.disk_total + + priv->mirrorStats.total; + info->fileRemaining = priv->stats.mig.disk_remaining + + (priv->mirrorStats.total - + priv->mirrorStats.transferred); + info->fileProcessed = priv->stats.mig.disk_transferred + + priv->mirrorStats.transferred; break; case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - info->memTotal = jobInfo->stats.mig.ram_total; - info->memRemaining = jobInfo->stats.mig.ram_remaining; - info->memProcessed = jobInfo->stats.mig.ram_transferred; + info->memTotal = priv->stats.mig.ram_total; + info->memRemaining = priv->stats.mig.ram_remaining; + info->memProcessed = priv->stats.mig.ram_transferred; break; case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - info->memTotal = jobInfo->stats.dump.total; - info->memProcessed = jobInfo->stats.dump.completed; + info->memTotal = priv->stats.dump.total; + info->memProcessed = priv->stats.dump.completed; info->memRemaining = info->memTotal - info->memProcessed; break; case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - info->fileTotal = jobInfo->stats.backup.total; - info->fileProcessed = jobInfo->stats.backup.transferred; + info->fileTotal = priv->stats.backup.total; + info->fileProcessed = priv->stats.backup.transferred; info->fileRemaining = info->fileTotal - info->fileProcessed; break; @@ -389,13 +377,14 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo, static int -qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, +qemuDomainMigrationJobDataToParams(virDomainJobData *jobData, int *type, virTypedParameterPtr *params, int *nparams) { - qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; - qemuDomainMirrorStats *mirrorStats = &jobInfo->mirrorStats; + qemuDomainJobDataPrivate *priv = jobData->privateData; + qemuMonitorMigrationStats *stats = &priv->stats.mig; + qemuDomainMirrorStats *mirrorStats = &priv->mirrorStats; virTypedParameterPtr par = NULL; int maxpar = 0; int npar = 0; @@ -404,19 +393,19 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, if (virTypedParamsAddInt(&par, &npar, &maxpar, VIR_DOMAIN_JOB_OPERATION, - jobInfo->operation) < 0) + jobData->operation) < 0) goto error; if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) + jobData->timeElapsed) < 0) goto error; - if (jobInfo->timeDeltaSet && - jobInfo->timeElapsed > jobInfo->timeDelta && + if (jobData->timeDeltaSet && + jobData->timeElapsed > jobData->timeDelta && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_TIME_ELAPSED_NET, - jobInfo->timeElapsed - jobInfo->timeDelta) < 0) + jobData->timeElapsed - jobData->timeDelta) < 0) goto error; if (stats->downtime_set && @@ -426,11 +415,11 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, goto error; if (stats->downtime_set && - jobInfo->timeDeltaSet && - stats->downtime > jobInfo->timeDelta && + jobData->timeDeltaSet && + stats->downtime > jobData->timeDelta && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DOWNTIME_NET, - stats->downtime - jobInfo->timeDelta) < 0) + stats->downtime - jobData->timeDelta) < 0) goto error; if (stats->setup_time_set && @@ -505,7 +494,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, /* The remaining stats are disk, mirror, or migration specific * so if this is a SAVEDUMP, we can just skip them */ - if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) + if (priv->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) goto done; if (virTypedParamsAddULLong(&par, &npar, &maxpar, @@ -554,7 +543,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, goto error; done: - *type = qemuDomainJobStatusToType(jobInfo->status); + *type = virDomainJobStatusToType(jobData->status); *params = par; *nparams = npar; return 0; @@ -566,24 +555,25 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, static int -qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo, +qemuDomainDumpJobDataToParams(virDomainJobData *jobData, int *type, virTypedParameterPtr *params, int *nparams) { - qemuMonitorDumpStats *stats = &jobInfo->stats.dump; + qemuDomainJobDataPrivate *priv = jobData->privateData; + qemuMonitorDumpStats *stats = &priv->stats.dump; virTypedParameterPtr par = NULL; int maxpar = 0; int npar = 0; if (virTypedParamsAddInt(&par, &npar, &maxpar, VIR_DOMAIN_JOB_OPERATION, - jobInfo->operation) < 0) + jobData->operation) < 0) goto error; if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) + jobData->timeElapsed) < 0) goto error; if (virTypedParamsAddULLong(&par, &npar, &maxpar, @@ -597,7 +587,7 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo, stats->total - stats->completed) < 0) goto error; - *type = qemuDomainJobStatusToType(jobInfo->status); + *type = virDomainJobStatusToType(jobData->status); *params = par; *nparams = npar; return 0; @@ -609,19 +599,20 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo, static int -qemuDomainBackupJobInfoToParams(qemuDomainJobInfo *jobInfo, +qemuDomainBackupJobDataToParams(virDomainJobData *jobData, int *type, virTypedParameterPtr *params, int *nparams) { - qemuDomainBackupStats *stats = &jobInfo->stats.backup; + qemuDomainJobDataPrivate *priv = jobData->privateData; + qemuDomainBackupStats *stats = &priv->stats.backup; g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); - if (virTypedParamListAddInt(par, jobInfo->operation, + if (virTypedParamListAddInt(par, jobData->operation, VIR_DOMAIN_JOB_OPERATION) < 0) return -1; - if (virTypedParamListAddULLong(par, jobInfo->timeElapsed, + if (virTypedParamListAddULLong(par, jobData->timeElapsed, VIR_DOMAIN_JOB_TIME_ELAPSED) < 0) return -1; @@ -649,38 +640,40 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfo *jobInfo, return -1; } - if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && + if (jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE && virTypedParamListAddBoolean(par, - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, + jobData->status == VIR_DOMAIN_JOB_STATUS_COMPLETED, VIR_DOMAIN_JOB_SUCCESS) < 0) return -1; - if (jobInfo->errmsg && - virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) + if (jobData->errmsg && + virTypedParamListAddString(par, jobData->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) return -1; *nparams = virTypedParamListStealParams(par, params); - *type = qemuDomainJobStatusToType(jobInfo->status); + *type = virDomainJobStatusToType(jobData->status); return 0; } int -qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo, +qemuDomainJobDataToParams(virDomainJobData *jobData, int *type, virTypedParameterPtr *params, int *nparams) { - switch (jobInfo->statsType) { + qemuDomainJobDataPrivate *priv = jobData->privateData; + + switch (priv->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); + return qemuDomainMigrationJobDataToParams(jobData, type, params, nparams); case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); + return qemuDomainDumpJobDataToParams(jobData, type, params, nparams); case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); + return qemuDomainBackupJobDataToParams(jobData, type, params, nparams); case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -688,7 +681,7 @@ qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo, break; default: - virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); + virReportEnumRangeError(qemuDomainJobStatsType, priv->statsType); break; } @@ -895,8 +888,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver, qemuDomainAsyncJobTypeToString(asyncJob), obj, obj->def->name); qemuDomainObjResetAsyncJob(&priv->job); - priv->job.current = g_new0(qemuDomainJobInfo, 1); - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks); + priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; priv->job.asyncJob = asyncJob; priv->job.asyncOwner = virThreadSelfID(); priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet()); diff --git a/src/qemu/qemu_domainjob.h b/src/qemu/qemu_domainjob.h index f904bd49c2..5de82ee016 100644 --- a/src/qemu/qemu_domainjob.h +++ b/src/qemu/qemu_domainjob.h @@ -20,6 +20,7 @@ #include <glib-object.h> #include "qemu_monitor.h" +#include "domain_job.h" #define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) #define QEMU_JOB_DEFAULT_MASK \ @@ -79,17 +80,6 @@ typedef enum { } qemuDomainAsyncJob; VIR_ENUM_DECL(qemuDomainAsyncJob); -typedef enum { - QEMU_DOMAIN_JOB_STATUS_NONE = 0, - QEMU_DOMAIN_JOB_STATUS_ACTIVE, - QEMU_DOMAIN_JOB_STATUS_MIGRATING, - QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED, - QEMU_DOMAIN_JOB_STATUS_PAUSED, - QEMU_DOMAIN_JOB_STATUS_POSTCOPY, - QEMU_DOMAIN_JOB_STATUS_COMPLETED, - QEMU_DOMAIN_JOB_STATUS_FAILED, - QEMU_DOMAIN_JOB_STATUS_CANCELED, -} qemuDomainJobStatus; typedef enum { QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0, @@ -114,24 +104,8 @@ struct _qemuDomainBackupStats { unsigned long long tmp_total; }; -typedef struct _qemuDomainJobInfo qemuDomainJobInfo; -struct _qemuDomainJobInfo { - qemuDomainJobStatus status; - virDomainJobOperation operation; - unsigned long long started; /* When the async job started */ - unsigned long long stopped; /* When the domain's CPUs were stopped */ - unsigned long long sent; /* When the source sent status info to the - destination (only for migrations). */ - unsigned long long received; /* When the destination host received status - info from the source (migrations only). */ - /* Computed values */ - unsigned long long timeElapsed; - long long timeDelta; /* delta = received - sent, i.e., the difference - between the source and the destination time plus - the time between the end of Perform phase on the - source and the beginning of Finish phase on the - destination. */ - bool timeDeltaSet; +typedef struct _qemuDomainJobDataPrivate qemuDomainJobDataPrivate; +struct _qemuDomainJobDataPrivate { /* Raw values from QEMU */ qemuDomainJobStatsType statsType; union { @@ -140,17 +114,9 @@ struct _qemuDomainJobInfo { qemuDomainBackupStats backup; } stats; qemuDomainMirrorStats mirrorStats; - - char *errmsg; /* optional error message for failed completed jobs */ }; -void -qemuDomainJobInfoFree(qemuDomainJobInfo *info); - -G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree); - -qemuDomainJobInfo * -qemuDomainJobInfoCopy(qemuDomainJobInfo *info); +extern virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks; typedef struct _qemuDomainJobObj qemuDomainJobObj; @@ -198,8 +164,8 @@ struct _qemuDomainJobObj { unsigned long long asyncStarted; /* When the current async job started */ int phase; /* Job phase (mainly for migrations) */ unsigned long long mask; /* Jobs allowed during async job */ - qemuDomainJobInfo *current; /* async job progress data */ - qemuDomainJobInfo *completed; /* statistics data of a recently completed job */ + virDomainJobData *current; /* async job progress data */ + virDomainJobData *completed; /* statistics data of a recently completed job */ bool abortJob; /* abort of the job requested */ char *error; /* job event completion error */ unsigned long apiFlags; /* flags passed to the API which started the async job */ @@ -256,14 +222,14 @@ void qemuDomainObjDiscardAsyncJob(virQEMUDriver *driver, virDomainObj *obj); void qemuDomainObjReleaseAsyncJob(virDomainObj *obj); -int qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo) +int qemuDomainJobInfoUpdateTime(virDomainJobData *jobData) ATTRIBUTE_NONNULL(1); -int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo) +int qemuDomainJobInfoUpdateDowntime(virDomainJobData *jobData) ATTRIBUTE_NONNULL(1); -int qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo, +int qemuDomainJobInfoToInfo(virDomainJobData *jobData, virDomainJobInfoPtr info) ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); -int qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo, +int qemuDomainJobDataToParams(virDomainJobData *jobData, int *type, virTypedParameterPtr *params, int *nparams) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 83cc7a04ea..800db34a4b 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2630,6 +2630,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver, int ret = -1; virObjectEvent *event = NULL; qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData; virQEMUSaveData *data = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL; @@ -2646,7 +2647,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver, goto endjob; } - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Pause */ if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { @@ -2939,6 +2940,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm) { qemuDomainObjPrivate *priv = vm->privateData; qemuDomainJobPrivate *jobPriv = priv->job.privateData; + qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData; VIR_DEBUG("Waiting for dump completion"); while (!jobPriv->dumpCompleted && !priv->job.abortJob) { @@ -2946,7 +2948,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm) return -1; } - if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { + if (privJobCurrent->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { if (priv->job.error) virReportError(VIR_ERR_OPERATION_FAILED, _("memory-only dump failed: %s"), @@ -2985,10 +2987,13 @@ qemuDumpToFd(virQEMUDriver *driver, if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) return -1; - if (detach) - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; - else - g_clear_pointer(&priv->job.current, qemuDomainJobInfoFree); + if (detach) { + qemuDomainJobDataPrivate *privStats = priv->job.current->privateData; + + privStats->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; + } else { + g_clear_pointer(&priv->job.current, virDomainJobDataFree); + } if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) return -1; @@ -3123,6 +3128,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, virQEMUDriver *driver = dom->conn->privateData; virDomainObj *vm; qemuDomainObjPrivate *priv = NULL; + qemuDomainJobDataPrivate *privJobCurrent = NULL; bool resume = false, paused = false; int ret = -1; virObjectEvent *event = NULL; @@ -3147,7 +3153,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, goto endjob; priv = vm->privateData; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + privJobCurrent = priv->job.current->privateData; + privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* Migrate will always stop the VM, so the resume condition is independent of whether the stop command is issued. */ @@ -12409,28 +12416,30 @@ qemuConnectBaselineHypervisorCPU(virConnectPtr conn, static int qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver, virDomainObj *vm, - qemuDomainJobInfo *jobInfo) + virDomainJobData *jobData) { qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privStats = jobData->privateData; + bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); - if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE || + jobData->status == VIR_DOMAIN_JOB_STATUS_MIGRATING || + jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED || + jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) { if (events && - jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && + jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE && qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE, - jobInfo, NULL) < 0) + jobData, NULL) < 0) return -1; - if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && - jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION && + if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE && + privStats->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION && qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE, - jobInfo) < 0) + jobData) < 0) return -1; - if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) + if (qemuDomainJobInfoUpdateTime(jobData) < 0) return -1; } @@ -12441,9 +12450,10 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver, static int qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver, virDomainObj *vm, - qemuDomainJobInfo *jobInfo) + virDomainJobData *jobData) { qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privJob = jobData->privateData; qemuMonitorDumpStats stats = { 0 }; int rc; @@ -12456,33 +12466,33 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver, if (rc < 0) return -1; - jobInfo->stats.dump = stats; + privJob->stats.dump = stats; - if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) + if (qemuDomainJobInfoUpdateTime(jobData) < 0) return -1; - switch (jobInfo->stats.dump.status) { + switch (privJob->stats.dump.status) { case QEMU_MONITOR_DUMP_STATUS_NONE: case QEMU_MONITOR_DUMP_STATUS_FAILED: case QEMU_MONITOR_DUMP_STATUS_LAST: virReportError(VIR_ERR_OPERATION_FAILED, _("dump query failed, status=%d"), - jobInfo->stats.dump.status); + privJob->stats.dump.status); return -1; break; case QEMU_MONITOR_DUMP_STATUS_ACTIVE: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + jobData->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; VIR_DEBUG("dump active, bytes written='%llu' remaining='%llu'", - jobInfo->stats.dump.completed, - jobInfo->stats.dump.total - - jobInfo->stats.dump.completed); + privJob->stats.dump.completed, + privJob->stats.dump.total - + privJob->stats.dump.completed); break; case QEMU_MONITOR_DUMP_STATUS_COMPLETED: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; VIR_DEBUG("dump completed, bytes written='%llu'", - jobInfo->stats.dump.completed); + privJob->stats.dump.completed); break; } @@ -12494,16 +12504,17 @@ static int qemuDomainGetJobStatsInternal(virQEMUDriver *driver, virDomainObj *vm, bool completed, - qemuDomainJobInfo **jobInfo) + virDomainJobData **jobData) { qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privStats = NULL; int ret = -1; - *jobInfo = NULL; + *jobData = NULL; if (completed) { if (priv->job.completed && !priv->job.current) - *jobInfo = qemuDomainJobInfoCopy(priv->job.completed); + *jobData = virDomainJobDataCopy(priv->job.completed); return 0; } @@ -12525,22 +12536,24 @@ qemuDomainGetJobStatsInternal(virQEMUDriver *driver, ret = 0; goto cleanup; } - *jobInfo = qemuDomainJobInfoCopy(priv->job.current); + *jobData = virDomainJobDataCopy(priv->job.current); + + privStats = (*jobData)->privateData; - switch ((*jobInfo)->statsType) { + switch (privStats->statsType) { case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: - if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobInfo) < 0) + if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobData) < 0) goto cleanup; break; case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: - if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobInfo) < 0) + if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobData) < 0) goto cleanup; break; case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: - if (qemuBackupGetJobInfoStats(driver, vm, *jobInfo) < 0) + if (qemuBackupGetJobInfoStats(driver, vm, *jobData) < 0) goto cleanup; break; @@ -12561,7 +12574,7 @@ qemuDomainGetJobInfo(virDomainPtr dom, virDomainJobInfoPtr info) { virQEMUDriver *driver = dom->conn->privateData; - g_autoptr(qemuDomainJobInfo) jobInfo = NULL; + g_autoptr(virDomainJobData) jobData = NULL; virDomainObj *vm; int ret = -1; @@ -12573,16 +12586,16 @@ qemuDomainGetJobInfo(virDomainPtr dom, if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobInfo) < 0) + if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobData) < 0) goto cleanup; - if (!jobInfo || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) { + if (!jobData || + jobData->status == VIR_DOMAIN_JOB_STATUS_NONE) { ret = 0; goto cleanup; } - ret = qemuDomainJobInfoToInfo(jobInfo, info); + ret = qemuDomainJobInfoToInfo(jobData, info); cleanup: virDomainObjEndAPI(&vm); @@ -12600,7 +12613,7 @@ qemuDomainGetJobStats(virDomainPtr dom, virQEMUDriver *driver = dom->conn->privateData; virDomainObj *vm; qemuDomainObjPrivate *priv; - g_autoptr(qemuDomainJobInfo) jobInfo = NULL; + g_autoptr(virDomainJobData) jobData = NULL; bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED); int ret = -1; @@ -12614,11 +12627,11 @@ qemuDomainGetJobStats(virDomainPtr dom, goto cleanup; priv = vm->privateData; - if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) + if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobData) < 0) goto cleanup; - if (!jobInfo || - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) { + if (!jobData || + jobData->status == VIR_DOMAIN_JOB_STATUS_NONE) { *type = VIR_DOMAIN_JOB_NONE; *params = NULL; *nparams = 0; @@ -12626,10 +12639,10 @@ qemuDomainGetJobStats(virDomainPtr dom, goto cleanup; } - ret = qemuDomainJobInfoToParams(jobInfo, type, params, nparams); + ret = qemuDomainJobDataToParams(jobData, type, params, nparams); if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED)) - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&priv->job.completed, virDomainJobDataFree); cleanup: virDomainObjEndAPI(&vm); @@ -12695,7 +12708,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) break; case QEMU_ASYNC_JOB_MIGRATION_OUT: - if ((priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || + if ((priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY || (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY))) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 2635ef1162..7957b79fc2 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1200,7 +1200,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver, return -1; if (priv->job.abortJob) { - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); @@ -1623,35 +1623,37 @@ qemuMigrationSrcWaitForSpice(virDomainObj *vm) static void -qemuMigrationUpdateJobType(qemuDomainJobInfo *jobInfo) +qemuMigrationUpdateJobType(virDomainJobData *jobData) { - switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) { + qemuDomainJobDataPrivate *priv = jobData->privateData; + + switch ((qemuMonitorMigrationStatus) priv->stats.mig.status) { case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY; + jobData->status = VIR_DOMAIN_JOB_STATUS_POSTCOPY; break; case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED; + jobData->status = VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED; break; case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE; + jobData->status = VIR_DOMAIN_JOB_STATUS_NONE; break; case QEMU_MONITOR_MIGRATION_STATUS_ERROR: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED; break; case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + jobData->status = VIR_DOMAIN_JOB_STATUS_CANCELED; break; case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED; + jobData->status = VIR_DOMAIN_JOB_STATUS_PAUSED; break; case QEMU_MONITOR_MIGRATION_STATUS_DEVICE: - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; + jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING; break; case QEMU_MONITOR_MIGRATION_STATUS_SETUP: @@ -1668,11 +1670,12 @@ int qemuMigrationAnyFetchStats(virQEMUDriver *driver, virDomainObj *vm, qemuDomainAsyncJob asyncJob, - qemuDomainJobInfo *jobInfo, + virDomainJobData *jobData, char **error) { qemuDomainObjPrivate *priv = vm->privateData; qemuMonitorMigrationStats stats; + qemuDomainJobDataPrivate *privJob = jobData->privateData; int rv; if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) @@ -1684,7 +1687,7 @@ qemuMigrationAnyFetchStats(virQEMUDriver *driver, if (rv < 0) return -1; - jobInfo->stats.mig = stats; + privJob->stats.mig = stats; return 0; } @@ -1725,41 +1728,42 @@ qemuMigrationJobCheckStatus(virQEMUDriver *driver, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobInfo *jobInfo = priv->job.current; + virDomainJobData *jobData = priv->job.current; + qemuDomainJobDataPrivate *privJob = jobData->privateData; g_autofree char *error = NULL; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); if (!events || - jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { - if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0) + privJob->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { + if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, &error) < 0) return -1; } - qemuMigrationUpdateJobType(jobInfo); + qemuMigrationUpdateJobType(jobData); - switch (jobInfo->status) { - case QEMU_DOMAIN_JOB_STATUS_NONE: + switch (jobData->status) { + case VIR_DOMAIN_JOB_STATUS_NONE: virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), qemuMigrationJobName(vm), _("is not active")); return -1; - case QEMU_DOMAIN_JOB_STATUS_FAILED: + case VIR_DOMAIN_JOB_STATUS_FAILED: virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), qemuMigrationJobName(vm), error ? error : _("unexpectedly failed")); return -1; - case QEMU_DOMAIN_JOB_STATUS_CANCELED: + case VIR_DOMAIN_JOB_STATUS_CANCELED: virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuMigrationJobName(vm), _("canceled by client")); return -1; - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: + case VIR_DOMAIN_JOB_STATUS_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_ACTIVE: + case VIR_DOMAIN_JOB_STATUS_MIGRATING: + case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_POSTCOPY: + case VIR_DOMAIN_JOB_STATUS_PAUSED: break; } @@ -1790,7 +1794,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver, unsigned int flags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobInfo *jobInfo = priv->job.current; + virDomainJobData *jobData = priv->job.current; int pauseReason; if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0) @@ -1820,7 +1824,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver, * wait again for the real end of the migration. */ if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER && - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { + jobData->status == VIR_DOMAIN_JOB_STATUS_PAUSED) { VIR_DEBUG("Migration paused before switchover"); return 1; } @@ -1830,38 +1834,38 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver, * will continue waiting until the migrate state changes to completed. */ if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY && - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) { VIR_DEBUG("Migration switched to post-copy"); return 1; } - if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) + if (jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED) return 1; else return 0; error: - switch (jobInfo->status) { - case QEMU_DOMAIN_JOB_STATUS_MIGRATING: - case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: - case QEMU_DOMAIN_JOB_STATUS_PAUSED: + switch (jobData->status) { + case VIR_DOMAIN_JOB_STATUS_MIGRATING: + case VIR_DOMAIN_JOB_STATUS_POSTCOPY: + case VIR_DOMAIN_JOB_STATUS_PAUSED: /* The migration was aborted by us rather than QEMU itself. */ - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED; return -2; - case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED: /* Something failed after QEMU already finished the migration. */ - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED; return -1; - case QEMU_DOMAIN_JOB_STATUS_FAILED: - case QEMU_DOMAIN_JOB_STATUS_CANCELED: + case VIR_DOMAIN_JOB_STATUS_FAILED: + case VIR_DOMAIN_JOB_STATUS_CANCELED: /* QEMU aborted the migration. */ return -1; - case QEMU_DOMAIN_JOB_STATUS_ACTIVE: - case QEMU_DOMAIN_JOB_STATUS_COMPLETED: - case QEMU_DOMAIN_JOB_STATUS_NONE: + case VIR_DOMAIN_JOB_STATUS_ACTIVE: + case VIR_DOMAIN_JOB_STATUS_COMPLETED: + case VIR_DOMAIN_JOB_STATUS_NONE: /* Impossible. */ break; } @@ -1881,11 +1885,11 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver, unsigned int flags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobInfo *jobInfo = priv->job.current; + virDomainJobData *jobData = priv->job.current; bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); int rv; - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; + jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING; while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob, dconn, flags)) != 1) { @@ -1895,7 +1899,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver, if (events) { if (virDomainObjWait(vm) < 0) { if (virDomainObjIsActive(vm)) - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED; return -2; } } else { @@ -1909,17 +1913,17 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver, } if (events) - ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL)); + ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, NULL)); - qemuDomainJobInfoUpdateTime(jobInfo); - qemuDomainJobInfoUpdateDowntime(jobInfo); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); - priv->job.completed = qemuDomainJobInfoCopy(jobInfo); - priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + qemuDomainJobInfoUpdateTime(jobData); + qemuDomainJobInfoUpdateDowntime(jobData); + g_clear_pointer(&priv->job.completed, virDomainJobDataFree); + priv->job.completed = virDomainJobDataCopy(jobData); + priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && - jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED) + jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; return 0; } @@ -3385,7 +3389,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver, virObjectEvent *event; qemuDomainObjPrivate *priv = vm->privateData; qemuDomainJobPrivate *jobPriv = priv->job.privateData; - qemuDomainJobInfo *jobInfo = NULL; + virDomainJobData *jobData = NULL; VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "flags=0x%x, retcode=%d", @@ -3405,13 +3409,15 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver, return -1; if (retcode == 0) - jobInfo = priv->job.completed; + jobData = priv->job.completed; else - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&priv->job.completed, virDomainJobDataFree); /* Update times with the values sent by the destination daemon */ - if (mig->jobInfo && jobInfo) { + if (mig->jobData && jobData) { int reason; + qemuDomainJobDataPrivate *privJob = jobData->privateData; + qemuDomainJobDataPrivate *privMigJob = mig->jobData->privateData; /* We need to refresh migration statistics after a completed post-copy * migration since priv->job.completed contains obsolete data from the @@ -3420,14 +3426,14 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver, if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && reason == VIR_DOMAIN_PAUSED_POSTCOPY && qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, - jobInfo, NULL) < 0) + jobData, NULL) < 0) VIR_WARN("Could not refresh migration statistics"); - qemuDomainJobInfoUpdateTime(jobInfo); - jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet; - jobInfo->timeDelta = mig->jobInfo->timeDelta; - jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set; - jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime; + qemuDomainJobInfoUpdateTime(jobData); + jobData->timeDeltaSet = mig->jobData->timeDeltaSet; + jobData->timeDelta = mig->jobData->timeDelta; + privJob->stats.mig.downtime_set = privMigJob->stats.mig.downtime_set; + privJob->stats.mig.downtime = privMigJob->stats.mig.downtime; } if (flags & VIR_MIGRATE_OFFLINE) @@ -4197,7 +4203,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver, /* explicitly do this *after* we entered the monitor, * as this is a critical section so we are guaranteed * priv->job.abortJob will not change */ - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED; virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), qemuDomainAsyncJobTypeToString(priv->job.asyncJob), _("canceled by client")); @@ -4312,7 +4318,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver, * resume it now once we finished all block jobs and wait for the real * end of the migration. */ - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { + if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) { if (qemuMigrationSrcContinue(driver, vm, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) @@ -4373,7 +4379,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver, if (virDomainObjIsActive(vm)) { if (cancel && - priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && + priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED && qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMonitorMigrateCancel(priv->mon); @@ -4388,8 +4394,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver, qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT); - if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED) + priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED; } if (iothread) @@ -5625,7 +5631,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver, unsigned short port; unsigned long long timeReceived = 0; virObjectEvent *event; - qemuDomainJobInfo *jobInfo = NULL; + virDomainJobData *jobData = NULL; bool inPostCopy = false; bool doKill = true; @@ -5649,7 +5655,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver, : QEMU_MIGRATION_PHASE_FINISH2); qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup); - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&priv->job.completed, virDomainJobDataFree); cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK | QEMU_MIGRATION_COOKIE_STATS | @@ -5741,7 +5747,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver, goto endjob; } - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) inPostCopy = true; if (!(flags & VIR_MIGRATE_PAUSED)) { @@ -5777,17 +5783,17 @@ qemuMigrationDstFinish(virQEMUDriver *driver, doKill = false; } - if (mig->jobInfo) { - jobInfo = mig->jobInfo; - mig->jobInfo = NULL; + if (mig->jobData) { + jobData = mig->jobData; + mig->jobData = NULL; - if (jobInfo->sent && timeReceived) { - jobInfo->timeDelta = timeReceived - jobInfo->sent; - jobInfo->received = timeReceived; - jobInfo->timeDeltaSet = true; + if (jobData->sent && timeReceived) { + jobData->timeDelta = timeReceived - jobData->sent; + jobData->received = timeReceived; + jobData->timeDeltaSet = true; } - qemuDomainJobInfoUpdateTime(jobInfo); - qemuDomainJobInfoUpdateDowntime(jobInfo); + qemuDomainJobInfoUpdateTime(jobData); + qemuDomainJobInfoUpdateDowntime(jobData); } if (inPostCopy) { @@ -5852,10 +5858,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver, } if (dom) { - if (jobInfo) { - priv->job.completed = g_steal_pointer(&jobInfo); - priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; - priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + if (jobData) { + qemuDomainJobDataPrivate *privJob = jobData->privateData; + + priv->job.completed = g_steal_pointer(&jobData); + priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; + privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; } if (qemuMigrationCookieFormat(mig, driver, vm, @@ -5868,7 +5876,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver, * is obsolete anyway. */ if (inPostCopy) - g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); + g_clear_pointer(&priv->job.completed, virDomainJobDataFree); } qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, @@ -5879,7 +5887,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver, qemuDomainRemoveInactiveJob(driver, vm); cleanup: - g_clear_pointer(&jobInfo, qemuDomainJobInfoFree); + g_clear_pointer(&jobData, virDomainJobDataFree); virPortAllocatorRelease(port); if (priv->mon) qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL); @@ -6097,6 +6105,7 @@ qemuMigrationJobStart(virQEMUDriver *driver, unsigned long apiFlags) { qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privJob = priv->job.current->privateData; virDomainJobOperation op; unsigned long long mask; @@ -6113,7 +6122,7 @@ qemuMigrationJobStart(virQEMUDriver *driver, if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) return -1; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; qemuDomainObjSetAsyncJobMask(vm, mask); return 0; @@ -6233,13 +6242,14 @@ int qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver, virDomainObj *vm, qemuDomainAsyncJob asyncJob, - qemuDomainJobInfo *jobInfo) + virDomainJobData *jobData) { size_t i; qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privJob = jobData->privateData; bool nbd = false; g_autoptr(GHashTable) blockinfo = NULL; - qemuDomainMirrorStats *stats = &jobInfo->mirrorStats; + qemuDomainMirrorStats *stats = &privJob->mirrorStats; for (i = 0; i < vm->def->ndisks; i++) { virDomainDiskDef *disk = vm->def->disks[i]; diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index b233358a51..6b169f73c7 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -221,7 +221,7 @@ int qemuMigrationAnyFetchStats(virQEMUDriver *driver, virDomainObj *vm, qemuDomainAsyncJob asyncJob, - qemuDomainJobInfo *jobInfo, + virDomainJobData *jobData, char **error); int @@ -258,4 +258,4 @@ int qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver, virDomainObj *vm, qemuDomainAsyncJob asyncJob, - qemuDomainJobInfo *jobInfo); + virDomainJobData *jobData); diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c index bffab7c13d..d1654b59c5 100644 --- a/src/qemu/qemu_migration_cookie.c +++ b/src/qemu/qemu_migration_cookie.c @@ -166,7 +166,7 @@ qemuMigrationCookieFree(qemuMigrationCookie *mig) g_free(mig->name); g_free(mig->lockState); g_free(mig->lockDriver); - g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree); + g_clear_pointer(&mig->jobData, virDomainJobDataFree); virCPUDefFree(mig->cpu); qemuMigrationCookieCapsFree(mig->caps); if (mig->blockDirtyBitmaps) @@ -531,8 +531,8 @@ qemuMigrationCookieAddStatistics(qemuMigrationCookie *mig, if (!priv->job.completed) return 0; - g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree); - mig->jobInfo = qemuDomainJobInfoCopy(priv->job.completed); + g_clear_pointer(&mig->jobData, virDomainJobDataFree); + mig->jobData = virDomainJobDataCopy(priv->job.completed); mig->flags |= QEMU_MIGRATION_COOKIE_STATS; @@ -632,22 +632,23 @@ qemuMigrationCookieNetworkXMLFormat(virBuffer *buf, static void qemuMigrationCookieStatisticsXMLFormat(virBuffer *buf, - qemuDomainJobInfo *jobInfo) + virDomainJobData *jobData) { - qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; + qemuDomainJobDataPrivate *priv = jobData->privateData; + qemuMonitorMigrationStats *stats = &priv->stats.mig; virBufferAddLit(buf, "<statistics>\n"); virBufferAdjustIndent(buf, 2); - virBufferAsprintf(buf, "<started>%llu</started>\n", jobInfo->started); - virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobInfo->stopped); - virBufferAsprintf(buf, "<sent>%llu</sent>\n", jobInfo->sent); - if (jobInfo->timeDeltaSet) - virBufferAsprintf(buf, "<delta>%lld</delta>\n", jobInfo->timeDelta); + virBufferAsprintf(buf, "<started>%llu</started>\n", jobData->started); + virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobData->stopped); + virBufferAsprintf(buf, "<sent>%llu</sent>\n", jobData->sent); + if (jobData->timeDeltaSet) + virBufferAsprintf(buf, "<delta>%lld</delta>\n", jobData->timeDelta); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed); + jobData->timeElapsed); if (stats->downtime_set) virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DOWNTIME, @@ -884,8 +885,8 @@ qemuMigrationCookieXMLFormat(virQEMUDriver *driver, if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd) qemuMigrationCookieNBDXMLFormat(mig->nbd, buf); - if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo) - qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobInfo); + if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData) + qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobData); if (mig->flags & QEMU_MIGRATION_COOKIE_CPU && mig->cpu) virCPUDefFormatBufFull(buf, mig->cpu, NULL); @@ -1031,29 +1032,30 @@ qemuMigrationCookieNBDXMLParse(xmlXPathContextPtr ctxt) } -static qemuDomainJobInfo * +static virDomainJobData * qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) { - qemuDomainJobInfo *jobInfo = NULL; + virDomainJobData *jobData = NULL; qemuMonitorMigrationStats *stats; + qemuDomainJobDataPrivate *priv = NULL; VIR_XPATH_NODE_AUTORESTORE(ctxt) if (!(ctxt->node = virXPathNode("./statistics", ctxt))) return NULL; - jobInfo = g_new0(qemuDomainJobInfo, 1); + jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks); + priv = jobData->privateData; + stats = &priv->stats.mig; + jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; - stats = &jobInfo->stats.mig; - jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; - - virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started); - virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped); - virXPathULongLong("string(./sent[1])", ctxt, &jobInfo->sent); - if (virXPathLongLong("string(./delta[1])", ctxt, &jobInfo->timeDelta) == 0) - jobInfo->timeDeltaSet = true; + virXPathULongLong("string(./started[1])", ctxt, &jobData->started); + virXPathULongLong("string(./stopped[1])", ctxt, &jobData->stopped); + virXPathULongLong("string(./sent[1])", ctxt, &jobData->sent); + if (virXPathLongLong("string(./delta[1])", ctxt, &jobData->timeDelta) == 0) + jobData->timeDeltaSet = true; virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_ELAPSED "[1])", - ctxt, &jobInfo->timeElapsed); + ctxt, &jobData->timeElapsed); if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])", ctxt, &stats->downtime) == 0) @@ -1113,7 +1115,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) virXPathInt("string(./" VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE "[1])", ctxt, &stats->cpu_throttle_percentage); - return jobInfo; + return jobData; } @@ -1385,7 +1387,7 @@ qemuMigrationCookieXMLParse(qemuMigrationCookie *mig, if (flags & QEMU_MIGRATION_COOKIE_STATS && virXPathBoolean("boolean(./statistics)", ctxt) && - (!(mig->jobInfo = qemuMigrationCookieStatisticsXMLParse(ctxt)))) + (!(mig->jobData = qemuMigrationCookieStatisticsXMLParse(ctxt)))) return -1; if (flags & QEMU_MIGRATION_COOKIE_CPU && @@ -1546,8 +1548,8 @@ qemuMigrationCookieParse(virQEMUDriver *driver, } } - if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo && priv->job.current) - mig->jobInfo->operation = priv->job.current->operation; + if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && priv->job.current) + mig->jobData->operation = priv->job.current->operation; return g_steal_pointer(&mig); } diff --git a/src/qemu/qemu_migration_cookie.h b/src/qemu/qemu_migration_cookie.h index 1726e5f2da..d9e1d949a8 100644 --- a/src/qemu/qemu_migration_cookie.h +++ b/src/qemu/qemu_migration_cookie.h @@ -162,7 +162,7 @@ struct _qemuMigrationCookie { qemuMigrationCookieNBD *nbd; /* If (flags & QEMU_MIGRATION_COOKIE_STATS) */ - qemuDomainJobInfo *jobInfo; + virDomainJobData *jobData; /* If flags & QEMU_MIGRATION_COOKIE_CPU */ virCPUDef *cpu; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 7ff4dc1835..fb7a04139a 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -650,7 +650,7 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED, if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING && !priv->pausedShutdown) { if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) { - if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) reason = VIR_DOMAIN_PAUSED_POSTCOPY; else reason = VIR_DOMAIN_PAUSED_MIGRATION; @@ -1544,6 +1544,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED, void *opaque) { qemuDomainObjPrivate *priv; + qemuDomainJobDataPrivate *privJob = NULL; virQEMUDriver *driver = opaque; virObjectEvent *event = NULL; int reason; @@ -1560,7 +1561,9 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED, goto cleanup; } - priv->job.current->stats.mig.status = status; + privJob = priv->job.current->privateData; + + privJob->stats.mig.status = status; virDomainObjBroadcast(vm); if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY && @@ -1622,6 +1625,7 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED, { qemuDomainObjPrivate *priv; qemuDomainJobPrivate *jobPriv; + qemuDomainJobDataPrivate *privJobCurrent = NULL; virObjectLock(vm); @@ -1630,18 +1634,19 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED, priv = vm->privateData; jobPriv = priv->job.privateData; + privJobCurrent = priv->job.current->privateData; if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job"); goto cleanup; } jobPriv->dumpCompleted = true; - priv->job.current->stats.dump = *stats; + privJobCurrent->stats.dump = *stats; priv->job.error = g_strdup(error); /* Force error if extracting the DUMP_COMPLETED status failed */ if (!error && status < 0) { priv->job.error = g_strdup(virGetLastErrorMessage()); - priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; + privJobCurrent->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; } virDomainObjBroadcast(vm); @@ -3592,6 +3597,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver, unsigned int *stopFlags) { qemuDomainObjPrivate *priv = vm->privateData; + qemuDomainJobDataPrivate *privDataJobCurrent = NULL; virDomainState state; int reason; unsigned long long now; @@ -3659,10 +3665,12 @@ qemuProcessRecoverJob(virQEMUDriver *driver, /* We reset the job parameters for backup so that the job will look * active. This is possible because we are able to recover the state * of blockjobs and also the backup job allows all sub-job types */ - priv->job.current = g_new0(qemuDomainJobInfo, 1); + priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks); + privDataJobCurrent = priv->job.current->privateData; + priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; - priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + privDataJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; priv->job.current->started = now; break; @@ -8280,7 +8288,7 @@ void qemuProcessStop(virQEMUDriver *driver, /* clean up a possible backup job */ if (priv->backup) - qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED); + qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED); /* Do this explicitly after vm->pid is reset so that security drivers don't * try to enter the domain's namespace which is non-existent by now as qemu diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 1887c70708..6f9aedace7 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -1414,11 +1414,13 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver, /* do the memory snapshot if necessary */ if (memory) { + qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData; + /* check if migration is possible */ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; /* allow the migration job to be cancelled or the domain to be paused */ qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | -- 2.34.1

On Thu, Jan 20, 2022 at 17:59:48 +0100, Kristina Hanicova wrote:
This patch includes: * introducing new files: src/hypervisor/domain_job.c and src/hypervisor/domain_job.h * new struct virDomainJobData, which is almost the same as qemuDomainJobInfo - the only differences are moving qemu specific job stats into the qemuDomainJobDataPrivate and adding jobType (possibly more attributes in the future if needed). * moving qemuDomainJobStatus to the domain_job.h and renaming it as virDomainJobStatus * moving and renaming qemuDomainJobStatusToType * adding callback struct virDomainJobDataPrivateDataCallbacks taking care of allocation, copying and freeing of private data of virDomainJobData * adding functions for virDomainJobDataPrivateDataCallbacks for qemu hypervisor * adding 'public' (public between the different hypervisors) functions taking care of init, copy, free of virDomainJobData * renaming every occurrence of qemuDomainJobInfo *info to virDomainJobData *data
Looks like the patch is doing a bit too much at once, but splitting it now would be a nightmare so take it mostly as a reminder for future patches :-)
Signed-off-by: Kristina Hanicova <khanicov@redhat.com> --- src/hypervisor/domain_job.c | 78 +++++++++++ src/hypervisor/domain_job.h | 72 ++++++++++ src/hypervisor/meson.build | 1 + src/libvirt_private.syms | 7 + src/qemu/qemu_backup.c | 40 +++--- src/qemu/qemu_backup.h | 4 +- src/qemu/qemu_domainjob.c | 227 +++++++++++++++---------------- src/qemu/qemu_domainjob.h | 54 ++------ src/qemu/qemu_driver.c | 111 ++++++++------- src/qemu/qemu_migration.c | 188 +++++++++++++------------ src/qemu/qemu_migration.h | 4 +- src/qemu/qemu_migration_cookie.c | 60 ++++---- src/qemu/qemu_migration_cookie.h | 2 +- src/qemu/qemu_process.c | 24 ++-- src/qemu/qemu_snapshot.c | 4 +- 15 files changed, 517 insertions(+), 359 deletions(-) create mode 100644 src/hypervisor/domain_job.c create mode 100644 src/hypervisor/domain_job.h
diff --git a/src/hypervisor/domain_job.c b/src/hypervisor/domain_job.c new file mode 100644 index 0000000000..daccbe4a69 --- /dev/null +++ b/src/hypervisor/domain_job.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2022 Red Hat, Inc. + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#include <config.h> +#include <string.h> + +#include "domain_job.h" + + +virDomainJobData * +virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb) +{ + virDomainJobData *ret = g_new0(virDomainJobData, 1); + + ret->privateDataCb = cb; + + if (ret->privateDataCb && ret->privateDataCb->allocPrivateData) + ret->privateData = ret->privateDataCb->allocPrivateData();
I think we should make this all or nothing. That is, if a caller passes non-NULL cb, all three callbacks should be non-NULL as it doesn't really make sense to set only some of them. And since I guess we don't want these functions to return an error, I suggest we just crash. In other words: if (ret->privateDataCb) ret->privateData = ret->privateDataCb->allocPrivateData();
+ + return ret; +} + +virDomainJobData * +virDomainJobDataCopy(virDomainJobData *data) +{ + virDomainJobData *ret = g_new0(virDomainJobData, 1); + + memcpy(ret, data, sizeof(*data)); + + if (ret->privateDataCb && data->privateDataCb->copyPrivateData) + ret->privateData = data->privateDataCb->copyPrivateData(data->privateData);
The same as above. Moreover, having the alloc callback without the copy one would result in the same pointer being stored in both structures, which is definitely not something we want to allow. Better crash deterministically now than debug memory corruption caused by freeing the same pointer twice.
+ + ret->errmsg = g_strdup(data->errmsg); + + return ret; +} + +void +virDomainJobDataFree(virDomainJobData *data) +{ + if (!data) + return; + + if (data->privateDataCb && data->privateDataCb->freePrivateData) + data->privateDataCb->freePrivateData(data->privateData);
Again, I don't think we want to risk hidden memory leaks if someone does not provide the free callback.
+ + g_free(data->errmsg); + g_free(data); +} + ... diff --git a/src/hypervisor/domain_job.h b/src/hypervisor/domain_job.h new file mode 100644 index 0000000000..257ef067e4 --- /dev/null +++ b/src/hypervisor/domain_job.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2022 Red Hat, Inc. + * SPDX-License-Identifier: LGPL-2.1-or-later + */ + +#pragma once + +#include "internal.h" + +typedef enum { + VIR_DOMAIN_JOB_STATUS_NONE = 0, + VIR_DOMAIN_JOB_STATUS_ACTIVE, + VIR_DOMAIN_JOB_STATUS_MIGRATING, + VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED, + VIR_DOMAIN_JOB_STATUS_PAUSED, + VIR_DOMAIN_JOB_STATUS_POSTCOPY, + VIR_DOMAIN_JOB_STATUS_COMPLETED, + VIR_DOMAIN_JOB_STATUS_FAILED, + VIR_DOMAIN_JOB_STATUS_CANCELED, +} virDomainJobStatus; + +typedef void *(*virDomainJobDataPrivateDataAlloc) (void); +typedef void *(*virDomainJobDataPrivateDataCopy) (void *); +typedef void (*virDomainJobDataPrivateDataFree) (void *); + +typedef struct _virDomainJobDataPrivateDataCallbacks virDomainJobDataPrivateDataCallbacks; +struct _virDomainJobDataPrivateDataCallbacks { + virDomainJobDataPrivateDataAlloc allocPrivateData; + virDomainJobDataPrivateDataCopy copyPrivateData; + virDomainJobDataPrivateDataFree freePrivateData;
The PrivateData suffix of these callbacks seems redundant given they are all included in PrivateDataCallbacks structure. You could drop that suffix and use just plain alloc, copy, and free. Not a big deal, though.
+}; + ... diff --git a/src/qemu/qemu_domainjob.c b/src/qemu/qemu_domainjob.c index 1ecde5af86..1d9bec2cfd 100644 --- a/src/qemu/qemu_domainjob.c +++ b/src/qemu/qemu_domainjob.c ... @@ -268,111 +280,87 @@ qemuDomainTrackJob(qemuDomainJob job)
int -qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo) +qemuDomainJobInfoUpdateTime(virDomainJobData *jobData)
I guess this function could (in a separate patch) go to domain_job.c as virDomainJobDataUpdateTime.
{ unsigned long long now;
- if (!jobInfo->started) + if (!jobData->started) return 0;
if (virTimeMillisNow(&now) < 0) return -1;
- if (now < jobInfo->started) { + if (now < jobData->started) { VIR_WARN("Async job starts in the future"); - jobInfo->started = 0; + jobData->started = 0; return 0; }
- jobInfo->timeElapsed = now - jobInfo->started; + jobData->timeElapsed = now - jobData->started; return 0; }
int -qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo) +qemuDomainJobInfoUpdateDowntime(virDomainJobData *jobData)
Shouldn't this function be called qemuDomainJobDataUpdateDowntime now?
{ unsigned long long now; + qemuDomainJobDataPrivate *priv = jobData->privateData;
- if (!jobInfo->stopped) + if (!jobData->stopped) return 0;
if (virTimeMillisNow(&now) < 0) return -1;
- if (now < jobInfo->stopped) { + if (now < jobData->stopped) { VIR_WARN("Guest's CPUs stopped in the future"); - jobInfo->stopped = 0; + jobData->stopped = 0; return 0; }
- jobInfo->stats.mig.downtime = now - jobInfo->stopped; - jobInfo->stats.mig.downtime_set = true; + priv->stats.mig.downtime = now - jobData->stopped; + priv->stats.mig.downtime_set = true; return 0; } ...
int -qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo, +qemuDomainJobInfoToInfo(virDomainJobData *jobData, virDomainJobInfoPtr info)
qemuDomainJobDataToInfo perhaps? ... Looks fine except for the few nits. Jirka

We only need to set statsType in almost every case of setting something from private data, so it seems unnecessary to pull privateData out of current / completed job for just this one thing every time. I think this patch keeps the code cleaner without variables used just once. Signed-off-by: Kristina Hanicova <khanicov@redhat.com> --- src/qemu/qemu_backup.c | 4 ++-- src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_domain.h | 3 +++ src/qemu/qemu_driver.c | 14 ++++++-------- src/qemu/qemu_migration.c | 9 ++++----- src/qemu/qemu_process.c | 5 ++--- src/qemu/qemu_snapshot.c | 5 ++--- 7 files changed, 27 insertions(+), 21 deletions(-) diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 081c4d023f..23d3f44dd8 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -746,7 +746,6 @@ qemuBackupBegin(virDomainObj *vm, unsigned int flags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobDataPrivate *privData = priv->job.current->privateData; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virDomainBackupDef) def = NULL; g_autofree char *suffix = NULL; @@ -800,7 +799,8 @@ qemuBackupBegin(virDomainObj *vm, qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_MODIFY))); - privData->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP); if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index a8401bac30..62e67b438f 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -81,6 +81,14 @@ VIR_LOG_INIT("qemu.qemu_domain"); +void +qemuDomainJobPrivateSetStatsType(qemuDomainJobDataPrivate *privData, + qemuDomainJobStatsType type) +{ + privData->statsType = type; +} + + static void * qemuJobAllocPrivate(void) { diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index e5046367e3..7363ff6444 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -488,6 +488,9 @@ void qemuDomainObjStopWorker(virDomainObj *dom); virDomainObj *qemuDomainObjFromDomain(virDomainPtr domain); +void qemuDomainJobPrivateSetStatsType(qemuDomainJobDataPrivate *privData, + qemuDomainJobStatsType type); + qemuDomainSaveCookie *qemuDomainSaveCookieNew(virDomainObj *vm); void qemuDomainEventFlush(int timer, void *opaque); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 800db34a4b..7f7684f1cc 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2630,7 +2630,6 @@ qemuDomainSaveInternal(virQEMUDriver *driver, int ret = -1; virObjectEvent *event = NULL; qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData; virQEMUSaveData *data = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL; @@ -2647,7 +2646,8 @@ qemuDomainSaveInternal(virQEMUDriver *driver, goto endjob; } - privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP); /* Pause */ if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { @@ -2988,9 +2988,8 @@ qemuDumpToFd(virQEMUDriver *driver, return -1; if (detach) { - qemuDomainJobDataPrivate *privStats = priv->job.current->privateData; - - privStats->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP); } else { g_clear_pointer(&priv->job.current, virDomainJobDataFree); } @@ -3128,7 +3127,6 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, virQEMUDriver *driver = dom->conn->privateData; virDomainObj *vm; qemuDomainObjPrivate *priv = NULL; - qemuDomainJobDataPrivate *privJobCurrent = NULL; bool resume = false, paused = false; int ret = -1; virObjectEvent *event = NULL; @@ -3153,8 +3151,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, goto endjob; priv = vm->privateData; - privJobCurrent = priv->job.current->privateData; - privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP); /* Migrate will always stop the VM, so the resume condition is independent of whether the stop command is issued. */ diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 7957b79fc2..9e4fd67e2a 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5859,11 +5859,10 @@ qemuMigrationDstFinish(virQEMUDriver *driver, if (dom) { if (jobData) { - qemuDomainJobDataPrivate *privJob = jobData->privateData; - priv->job.completed = g_steal_pointer(&jobData); priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; - privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + qemuDomainJobPrivateSetStatsType(jobData->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION); } if (qemuMigrationCookieFormat(mig, driver, vm, @@ -6105,7 +6104,6 @@ qemuMigrationJobStart(virQEMUDriver *driver, unsigned long apiFlags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobDataPrivate *privJob = priv->job.current->privateData; virDomainJobOperation op; unsigned long long mask; @@ -6122,7 +6120,8 @@ qemuMigrationJobStart(virQEMUDriver *driver, if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) return -1; - privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION); qemuDomainObjSetAsyncJobMask(vm, mask); return 0; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index fb7a04139a..39872e6c12 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -3597,7 +3597,6 @@ qemuProcessRecoverJob(virQEMUDriver *driver, unsigned int *stopFlags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobDataPrivate *privDataJobCurrent = NULL; virDomainState state; int reason; unsigned long long now; @@ -3666,10 +3665,10 @@ qemuProcessRecoverJob(virQEMUDriver *driver, * active. This is possible because we are able to recover the state * of blockjobs and also the backup job allows all sub-job types */ priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks); - privDataJobCurrent = priv->job.current->privateData; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP); priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; - privDataJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; priv->job.current->started = now; break; diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c index 6f9aedace7..359a7a32ea 100644 --- a/src/qemu/qemu_snapshot.c +++ b/src/qemu/qemu_snapshot.c @@ -1414,13 +1414,12 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver, /* do the memory snapshot if necessary */ if (memory) { - qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData; - /* check if migration is possible */ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) goto cleanup; - privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP); /* allow the migration job to be cancelled or the domain to be paused */ qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | -- 2.34.1

On Thu, Jan 20, 2022 at 17:59:49 +0100, Kristina Hanicova wrote:
We only need to set statsType in almost every case of setting something from private data, so it seems unnecessary to pull privateData out of current / completed job for just this one thing every time. I think this patch keeps the code cleaner without variables used just once.
Signed-off-by: Kristina Hanicova <khanicov@redhat.com> --- src/qemu/qemu_backup.c | 4 ++-- src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_domain.h | 3 +++ src/qemu/qemu_driver.c | 14 ++++++-------- src/qemu/qemu_migration.c | 9 ++++----- src/qemu/qemu_process.c | 5 ++--- src/qemu/qemu_snapshot.c | 5 ++--- 7 files changed, 27 insertions(+), 21 deletions(-)
diff --git a/src/qemu/qemu_backup.c b/src/qemu/qemu_backup.c index 081c4d023f..23d3f44dd8 100644 --- a/src/qemu/qemu_backup.c +++ b/src/qemu/qemu_backup.c @@ -746,7 +746,6 @@ qemuBackupBegin(virDomainObj *vm, unsigned int flags) { qemuDomainObjPrivate *priv = vm->privateData; - qemuDomainJobDataPrivate *privData = priv->job.current->privateData; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virDomainBackupDef) def = NULL; g_autofree char *suffix = NULL; @@ -800,7 +799,8 @@ qemuBackupBegin(virDomainObj *vm, qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_MODIFY))); - privData->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; + qemuDomainJobPrivateSetStatsType(priv->job.current->privateData, + QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index a8401bac30..62e67b438f 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c
The new helper would fit slightly better in qemu_domainjob.c
@@ -81,6 +81,14 @@ VIR_LOG_INIT("qemu.qemu_domain");
+void +qemuDomainJobPrivateSetStatsType(qemuDomainJobDataPrivate *privData, + qemuDomainJobStatsType type)
Since we're doing this to minimize the repeated code pattern, I would go a little bit further and change the prototype to qemuDomainJobSetStatsType(virDomainJobData *jobData, qemuDomainJobStatsType type)
+{
Having an extra qemuDomainJobDataPrivate *privData = jobData->privateData; here is better than repeating the dereference for every single caller.
+ privData->statsType = type; +} + + static void * qemuJobAllocPrivate(void) { ...
Looks OK otherwise. Jirka

This transition will make it easier for me in order to generalize jobs in the future as they will always use virDomainJobData and virDomainJobInfo will be only used in the public api.. Signed-off-by: Kristina Hanicova <khanicov@redhat.com> --- src/libxl/libxl_domain.c | 10 +++++----- src/libxl/libxl_domain.h | 3 ++- src/libxl/libxl_driver.c | 14 +++++++++----- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/libxl/libxl_domain.c b/src/libxl/libxl_domain.c index feca60f7d2..8f2feb328b 100644 --- a/src/libxl/libxl_domain.c +++ b/src/libxl/libxl_domain.c @@ -60,7 +60,7 @@ libxlDomainObjInitJob(libxlDomainObjPrivate *priv) if (virCondInit(&priv->job.cond) < 0) return -1; - priv->job.current = g_new0(virDomainJobInfo, 1); + priv->job.current = virDomainJobDataInit(NULL); return 0; } @@ -78,7 +78,7 @@ static void libxlDomainObjFreeJob(libxlDomainObjPrivate *priv) { ignore_value(virCondDestroy(&priv->job.cond)); - VIR_FREE(priv->job.current); + virDomainJobDataFree(priv->job.current); } /* Give up waiting for mutex after 30 seconds */ @@ -119,7 +119,7 @@ libxlDomainObjBeginJob(libxlDriverPrivate *driver G_GNUC_UNUSED, priv->job.active = job; priv->job.owner = virThreadSelfID(); priv->job.started = now; - priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED; + priv->job.current->jobType = VIR_DOMAIN_JOB_UNBOUNDED; return 0; @@ -168,7 +168,7 @@ libxlDomainObjEndJob(libxlDriverPrivate *driver G_GNUC_UNUSED, int libxlDomainJobUpdateTime(struct libxlDomainJobObj *job) { - virDomainJobInfoPtr jobInfo = job->current; + virDomainJobData *jobData = job->current; unsigned long long now; if (!job->started) @@ -182,7 +182,7 @@ libxlDomainJobUpdateTime(struct libxlDomainJobObj *job) return 0; } - jobInfo->timeElapsed = now - job->started; + jobData->timeElapsed = now - job->started; return 0; } diff --git a/src/libxl/libxl_domain.h b/src/libxl/libxl_domain.h index 981bfc2bca..475e4a6933 100644 --- a/src/libxl/libxl_domain.h +++ b/src/libxl/libxl_domain.h @@ -26,6 +26,7 @@ #include "libxl_conf.h" #include "virchrdev.h" #include "virenum.h" +#include "domain_job.h" /* Only 1 job is allowed at any time * A job includes *all* libxl.so api, even those just querying @@ -46,7 +47,7 @@ struct libxlDomainJobObj { enum libxlDomainJob active; /* Currently running job */ int owner; /* Thread which set current job */ unsigned long long started; /* When the job started */ - virDomainJobInfoPtr current; /* Statistics for the current job */ + virDomainJobData *current; /* Statistics for the current job */ }; typedef struct _libxlDomainObjPrivate libxlDomainObjPrivate; diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c index 2d9385654c..03d542299b 100644 --- a/src/libxl/libxl_driver.c +++ b/src/libxl/libxl_driver.c @@ -5212,7 +5212,11 @@ libxlDomainGetJobInfo(virDomainPtr dom, if (libxlDomainJobUpdateTime(&priv->job) < 0) goto cleanup; - memcpy(info, priv->job.current, sizeof(virDomainJobInfo)); + /* setting only these two attributes is enough because libxl never sets + * anything else */ + memset(info, 0, sizeof(*info)); + info->type = priv->job.current->jobType; + info->timeElapsed = priv->job.current->timeElapsed; ret = 0; cleanup: @@ -5229,7 +5233,7 @@ libxlDomainGetJobStats(virDomainPtr dom, { libxlDomainObjPrivate *priv; virDomainObj *vm; - virDomainJobInfoPtr jobInfo; + virDomainJobData *jobData; int ret = -1; int maxparams = 0; @@ -5243,7 +5247,7 @@ libxlDomainGetJobStats(virDomainPtr dom, goto cleanup; priv = vm->privateData; - jobInfo = priv->job.current; + jobData = priv->job.current; if (!priv->job.active) { *type = VIR_DOMAIN_JOB_NONE; *params = NULL; @@ -5260,10 +5264,10 @@ libxlDomainGetJobStats(virDomainPtr dom, if (virTypedParamsAddULLong(params, nparams, &maxparams, VIR_DOMAIN_JOB_TIME_ELAPSED, - jobInfo->timeElapsed) < 0) + jobData->timeElapsed) < 0) goto cleanup; - *type = jobInfo->type; + *type = jobData->jobType; ret = 0; cleanup: -- 2.34.1

On Thu, Jan 20, 2022 at 17:59:50 +0100, Kristina Hanicova wrote:
This transition will make it easier for me in order to generalize
s/in order //
jobs in the future as they will always use virDomainJobData and virDomainJobInfo will be only used in the public api..
Signed-off-by: Kristina Hanicova <khanicov@redhat.com> --- src/libxl/libxl_domain.c | 10 +++++----- src/libxl/libxl_domain.h | 3 ++- src/libxl/libxl_driver.c | 14 +++++++++----- 3 files changed, 16 insertions(+), 11 deletions(-)
Reviewed-by: Jiri Denemark <jdenemar@redhat.com>
participants (2)
-
Jiri Denemark
-
Kristina Hanicova