[libvirt] [PATCH 0/2] Report VIR_DOMAIN_JOB_OPERATION in job stats

As agreed in [1] we should enhance job statistics returned by virDomainGetJobStats API and VIR_DOMAIN_EVENT_ID_JOB_COMPLETED event with a new typed parameter which would indicate what operation (migration, snapshot, ...) the reported statistics belong to. https://bugzilla.redhat.com/show_bug.cgi?id=1441563 [1] https://www.redhat.com/archives/libvir-list/2017-April/msg00540.html Jiri Denemark (2): Add VIR_DOMAIN_JOB_OPERATION typed parameter qemu: Report VIR_DOMAIN_JOB_OPERATION include/libvirt/libvirt-domain.h | 25 +++++++++++++++++++++++++ src/qemu/qemu_domain.c | 16 +++++++++++++--- src/qemu/qemu_domain.h | 4 +++- src/qemu/qemu_driver.c | 32 ++++++++++++++++++++++---------- src/qemu/qemu_migration.c | 19 ++++++++++++------- src/qemu/qemu_process.c | 6 ++++-- src/qemu/qemu_process.h | 3 ++- tools/virsh-domain.c | 29 +++++++++++++++++++++++++++++ 8 files changed, 110 insertions(+), 24 deletions(-) -- 2.12.2

The parameter is reported by virDomainGetJobStats API and VIR_DOMAIN_EVENT_ID_JOB_COMPLETED event and it can be used to identify the operation (migration, snapshot, ...) to which the reported statistics belong. https://bugzilla.redhat.com/show_bug.cgi?id=1441563 Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- include/libvirt/libvirt-domain.h | 25 +++++++++++++++++++++++++ tools/virsh-domain.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index 501996bc8..c9e96a6c9 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -3117,6 +3117,31 @@ int virDomainGetJobStats(virDomainPtr domain, unsigned int flags); int virDomainAbortJob(virDomainPtr dom); +typedef enum { + VIR_DOMAIN_JOB_OPERATION_UNKNOWN = 0, + VIR_DOMAIN_JOB_OPERATION_START = 1, + VIR_DOMAIN_JOB_OPERATION_SAVE = 2, + VIR_DOMAIN_JOB_OPERATION_RESTORE = 3, + VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN = 4, + VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT = 5, + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT = 6, + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT = 7, + VIR_DOMAIN_JOB_OPERATION_DUMP = 8, + +# ifdef VIR_ENUM_SENTINELS + VIR_DOMAIN_JOB_OPERATION_LAST +# endif +} virDomainJobOperation; + +/** + * VIR_DOMAIN_JOB_OPERATION: + * + * virDomainGetJobStats field: the operation which started the job as + * VIR_TYPED_PARAM_INT. The values correspond to the items in + * virDomainJobOperation enum. + */ +# define VIR_DOMAIN_JOB_OPERATION "operation" + /** * VIR_DOMAIN_JOB_TIME_ELAPSED: * diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index db8accfe4..0d19d0e01 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5658,6 +5658,26 @@ virshDomainJobToString(int type) return str ? _(str) : _("unknown"); } +VIR_ENUM_DECL(virshDomainJobOperation); +VIR_ENUM_IMPL(virshDomainJobOperation, + VIR_DOMAIN_JOB_OPERATION_LAST, + N_("Unknown"), + N_("Start"), + N_("Save"), + N_("Restore"), + N_("Incoming migration"), + N_("Outgoing migration"), + N_("Snapshot"), + N_("Snapshot revert"), + N_("Dump")) + +static const char * +virshDomainJobOperationToString(int op) +{ + const char *str = virshDomainJobOperationTypeToString(op); + return str ? _(str) : _("unknown"); +} + static bool cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd) { @@ -5671,6 +5691,7 @@ cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd) unsigned long long value; unsigned int flags = 0; int ivalue; + int op; int rc; if (!(dom = virshCommandOptDomain(ctl, cmd, NULL))) @@ -5740,6 +5761,14 @@ cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd) goto cleanup; } + op = VIR_DOMAIN_JOB_OPERATION_UNKNOWN; + if ((rc = virTypedParamsGetInt(params, nparams, + VIR_DOMAIN_JOB_OPERATION, &op)) < 0) + goto save_error; + + vshPrint(ctl, "%-17s %-12s\n", _("Operation:"), + virshDomainJobOperationToString(op)); + vshPrint(ctl, "%-17s %-12llu ms\n", _("Time elapsed:"), info.timeElapsed); if ((rc = virTypedParamsGetULLong(params, nparams, VIR_DOMAIN_JOB_TIME_ELAPSED_NET, -- 2.12.2

https://bugzilla.redhat.com/show_bug.cgi?id=1441563 Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_domain.c | 16 +++++++++++++--- src/qemu/qemu_domain.h | 4 +++- src/qemu/qemu_driver.c | 32 ++++++++++++++++++++++---------- src/qemu/qemu_migration.c | 19 ++++++++++++------- src/qemu/qemu_process.c | 6 ++++-- src/qemu/qemu_process.h | 3 ++- 6 files changed, 56 insertions(+), 24 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 00b0b4ac4..6824af565 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -436,6 +436,11 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, int maxpar = 0; int npar = 0; + if (virTypedParamsAddInt(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_OPERATION, + jobInfo->operation) < 0) + goto error; + if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_TIME_ELAPSED, jobInfo->timeElapsed) < 0) @@ -3736,13 +3741,18 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj, - qemuDomainAsyncJob asyncJob) + qemuDomainAsyncJob asyncJob, + virDomainJobOperation operation) { + qemuDomainObjPrivatePtr priv; + if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC, asyncJob) < 0) return -1; - else - return 0; + + priv = obj->privateData; + priv->job.current->operation = operation; + return 0; } int diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 036ae1293..aebd91ad3 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -103,6 +103,7 @@ typedef struct _qemuDomainJobInfo qemuDomainJobInfo; typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; struct _qemuDomainJobInfo { virDomainJobType type; + virDomainJobOperation operation; unsigned long long started; /* When the async job started */ unsigned long long stopped; /* When the domain's CPUs were stopped */ unsigned long long sent; /* When the source sent status info to the @@ -433,7 +434,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver, ATTRIBUTE_RETURN_CHECK; int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj, - qemuDomainAsyncJob asyncJob) + qemuDomainAsyncJob asyncJob, + virDomainJobOperation operation) ATTRIBUTE_RETURN_CHECK; int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver, virDomainObjPtr obj, diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index e39de625d..8d49e62cf 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -268,7 +268,8 @@ qemuAutostartDomain(virDomainObjPtr vm, virResetLastError(); if (vm->autostart && !virDomainObjIsActive(vm)) { - if (qemuProcessBeginJob(data->driver, vm) < 0) { + if (qemuProcessBeginJob(data->driver, vm, + VIR_DOMAIN_JOB_OPERATION_START) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("Failed to start job on VM '%s': %s"), vm->def->name, virGetLastErrorMessage()); @@ -1761,7 +1762,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, virObjectRef(vm); def = NULL; - if (qemuProcessBeginJob(driver, vm) < 0) { + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < 0) { qemuDomainRemoveInactive(driver, vm); goto cleanup; } @@ -3147,7 +3148,8 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, if (!qemuMigrationIsAllowed(driver, vm, false, 0)) goto cleanup; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE) < 0) + if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE, + VIR_DOMAIN_JOB_OPERATION_SAVE) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -3685,7 +3687,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, goto cleanup; if (qemuDomainObjBeginAsyncJob(driver, vm, - QEMU_ASYNC_JOB_DUMP) < 0) + QEMU_ASYNC_JOB_DUMP, + VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -3907,7 +3910,8 @@ processWatchdogEvent(virQEMUDriverPtr driver, switch (action) { case VIR_DOMAIN_WATCHDOG_ACTION_DUMP: if (qemuDomainObjBeginAsyncJob(driver, vm, - QEMU_ASYNC_JOB_DUMP) < 0) { + QEMU_ASYNC_JOB_DUMP, + VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) { goto cleanup; } @@ -3994,7 +3998,8 @@ processGuestPanicEvent(virQEMUDriverPtr driver, virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); bool removeInactive = false; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP) < 0) + if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP, + VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -6479,7 +6484,7 @@ qemuDomainRestoreFlags(virConnectPtr conn, priv->hookRun = true; } - if (qemuProcessBeginJob(driver, vm) < 0) + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE) < 0) goto cleanup; ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path, @@ -6899,6 +6904,7 @@ qemuDomainObjStart(virConnectPtr conn, bool bypass_cache = (flags & VIR_DOMAIN_START_BYPASS_CACHE) != 0; bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0; unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD; + qemuDomainObjPrivatePtr priv = vm->privateData; start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0; start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0; @@ -6922,6 +6928,9 @@ qemuDomainObjStart(virConnectPtr conn, } vm->hasManagedSave = false; } else { + virDomainJobOperation op = priv->job.current->operation; + priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE; + ret = qemuDomainObjRestore(conn, driver, vm, managed_save, start_paused, bypass_cache, asyncJob); @@ -6938,6 +6947,7 @@ qemuDomainObjStart(virConnectPtr conn, goto cleanup; } else { VIR_WARN("Ignoring incomplete managed state %s", managed_save); + priv->job.current->operation = op; } } } @@ -6987,7 +6997,7 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags) if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0) goto cleanup; - if (qemuProcessBeginJob(driver, vm) < 0) + if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < 0) goto cleanup; if (virDomainObjIsActive(vm)) { @@ -14552,7 +14562,8 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain, * a regular job, so we need to set the job mask to disallow query as * 'savevm' blocks the monitor. External snapshot will then modify the * job mask appropriately. */ - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT) < 0) + if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT, + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT) < 0) goto cleanup; qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); @@ -15142,7 +15153,8 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, goto cleanup; } - if (qemuProcessBeginJob(driver, vm) < 0) + if (qemuProcessBeginJob(driver, vm, + VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT) < 0) goto cleanup; if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 09adb0484..7e97368c8 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5607,18 +5607,23 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, qemuDomainAsyncJob job) { qemuDomainObjPrivatePtr priv = vm->privateData; - - if (qemuDomainObjBeginAsyncJob(driver, vm, job) < 0) - return -1; + virDomainJobOperation op; + unsigned long long mask; if (job == QEMU_ASYNC_JOB_MIGRATION_IN) { - qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); + op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN; + mask = QEMU_JOB_NONE; } else { - qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | - JOB_MASK(QEMU_JOB_SUSPEND) | - JOB_MASK(QEMU_JOB_MIGRATION_OP))); + op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT; + mask = QEMU_JOB_DEFAULT_MASK | + JOB_MASK(QEMU_JOB_SUSPEND) | + JOB_MASK(QEMU_JOB_MIGRATION_OP); } + if (qemuDomainObjBeginAsyncJob(driver, vm, job, op) < 0) + return -1; + + qemuDomainObjSetAsyncJobMask(vm, mask); priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED; return 0; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 53170d732..acfffdb8e 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -4143,11 +4143,13 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, */ int qemuProcessBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr vm) + virDomainObjPtr vm, + virDomainJobOperation operation) { qemuDomainObjPrivatePtr priv = vm->privateData; - if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START) < 0) + if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START, + operation) < 0) return -1; qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 21f3b0cca..830d8cef8 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -59,7 +59,8 @@ qemuProcessIncomingDefPtr qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps, void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr inc); int qemuProcessBeginJob(virQEMUDriverPtr driver, - virDomainObjPtr vm); + virDomainObjPtr vm, + virDomainJobOperation operation); void qemuProcessEndJob(virQEMUDriverPtr driver, virDomainObjPtr vm); -- 2.12.2

On Wed, Apr 26, 2017 at 17:46:20 +0200, Jiri Denemark wrote:
This commit message is quite sparse. You could at least mention that all async job entry points need to be annotated. ACK series.

On Thu, Apr 27, 2017 at 14:07:17 +0200, Peter Krempa wrote:
On Wed, Apr 26, 2017 at 17:46:20 +0200, Jiri Denemark wrote:
This commit message is quite sparse. You could at least mention that all async job entry points need to be annotated.
ACK series.
I made the message a bit more verbose and pushed. Thanks. Jirka
participants (2)
-
Jiri Denemark
-
Peter Krempa