[libvirt] [PATCH 0/5] Misc migration cleanups

Jiri Denemark (5): qemu: Reorder migration status enum qemu: Rename qemuMonitorMigrationStatus struct qemu: Create a proper type for migration status enum qemu: Report more migration statistics qemu: Refactor qemuMigrationFinish include/libvirt/libvirt-domain.h | 19 ++ src/qemu/qemu_domain.c | 92 +++++----- src/qemu/qemu_domain.h | 2 +- src/qemu/qemu_driver.c | 2 +- src/qemu/qemu_migration.c | 368 ++++++++++++++++++++------------------- src/qemu/qemu_monitor.c | 14 +- src/qemu/qemu_monitor.h | 20 ++- src/qemu/qemu_monitor_json.c | 96 +++++----- src/qemu/qemu_monitor_json.h | 4 +- src/qemu/qemu_monitor_text.c | 40 ++--- src/qemu/qemu_monitor_text.h | 4 +- src/qemu/qemu_process.c | 2 +- tests/qemumonitorjsontest.c | 22 +-- tools/virsh-domain.c | 16 ++ 14 files changed, 388 insertions(+), 313 deletions(-) -- 2.7.0

A migration is in "setup" state after it was "inactive" and before it becomes "active". Let's reflect this in our migration status enum. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_monitor.c | 6 ++++-- src/qemu/qemu_monitor.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 4906faa..249a25e 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -160,8 +160,10 @@ VIR_ONCE_GLOBAL_INIT(qemuMonitor) VIR_ENUM_IMPL(qemuMonitorMigrationStatus, QEMU_MONITOR_MIGRATION_STATUS_LAST, - "inactive", "active", "completed", "failed", "cancelling", - "cancelled", "setup") + "inactive", "setup", + "active", + "completed", "failed", + "cancelling", "cancelled") VIR_ENUM_IMPL(qemuMonitorMigrationCaps, QEMU_MONITOR_MIGRATION_CAPS_LAST, diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 6be0108..84e51cd 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -457,12 +457,12 @@ int qemuMonitorSetMigrationCacheSize(qemuMonitorPtr mon, enum { QEMU_MONITOR_MIGRATION_STATUS_INACTIVE, + QEMU_MONITOR_MIGRATION_STATUS_SETUP, QEMU_MONITOR_MIGRATION_STATUS_ACTIVE, QEMU_MONITOR_MIGRATION_STATUS_COMPLETED, QEMU_MONITOR_MIGRATION_STATUS_ERROR, QEMU_MONITOR_MIGRATION_STATUS_CANCELLING, QEMU_MONITOR_MIGRATION_STATUS_CANCELLED, - QEMU_MONITOR_MIGRATION_STATUS_SETUP, QEMU_MONITOR_MIGRATION_STATUS_LAST }; -- 2.7.0

On Fri, Jan 08, 2016 at 10:49:34 +0100, Jiri Denemark wrote:
A migration is in "setup" state after it was "inactive" and before it becomes "active". Let's reflect this in our migration status enum.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_monitor.c | 6 ++++-- src/qemu/qemu_monitor.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-)
ACK

The structure actually contains migration statistics rather than just the status as the name suggests. Renaming it as qemuMonitorMigrationStats removes the confusion. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_domain.c | 84 ++++++++++++++++++------------------ src/qemu/qemu_domain.h | 2 +- src/qemu/qemu_driver.c | 2 +- src/qemu/qemu_migration.c | 100 +++++++++++++++++++++---------------------- src/qemu/qemu_monitor.c | 8 ++-- src/qemu/qemu_monitor.h | 10 ++--- src/qemu/qemu_monitor_json.c | 74 ++++++++++++++++---------------- src/qemu/qemu_monitor_json.h | 4 +- src/qemu/qemu_monitor_text.c | 40 ++++++++--------- src/qemu/qemu_monitor_text.h | 4 +- src/qemu/qemu_process.c | 2 +- tests/qemumonitorjsontest.c | 22 +++++----- 12 files changed, 176 insertions(+), 176 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 73fc79d..fb50c91 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -262,8 +262,8 @@ qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) return 0; } - jobInfo->status.downtime = now - jobInfo->stopped; - jobInfo->status.downtime_set = true; + jobInfo->stats.downtime = now - jobInfo->stopped; + jobInfo->stats.downtime_set = true; return 0; } @@ -275,13 +275,13 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, info->timeElapsed = jobInfo->timeElapsed; info->timeRemaining = jobInfo->timeRemaining; - info->memTotal = jobInfo->status.ram_total; - info->memRemaining = jobInfo->status.ram_remaining; - info->memProcessed = jobInfo->status.ram_transferred; + info->memTotal = jobInfo->stats.ram_total; + info->memRemaining = jobInfo->stats.ram_remaining; + info->memProcessed = jobInfo->stats.ram_transferred; - info->fileTotal = jobInfo->status.disk_total; - info->fileRemaining = jobInfo->status.disk_remaining; - info->fileProcessed = jobInfo->status.disk_transferred; + info->fileTotal = jobInfo->stats.disk_total; + info->fileRemaining = jobInfo->stats.disk_remaining; + info->fileProcessed = jobInfo->stats.disk_transferred; info->dataTotal = info->memTotal + info->fileTotal; info->dataRemaining = info->memRemaining + info->fileRemaining; @@ -296,7 +296,7 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, virTypedParameterPtr *params, int *nparams) { - qemuMonitorMigrationStatus *status = &jobInfo->status; + qemuMonitorMigrationStats *stats = &jobInfo->stats; virTypedParameterPtr par = NULL; int maxpar = 0; int npar = 0; @@ -319,103 +319,103 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, jobInfo->timeRemaining) < 0) goto error; - if (status->downtime_set && + if (stats->downtime_set && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DOWNTIME, - status->downtime) < 0) + stats->downtime) < 0) goto error; - if (status->downtime_set && + if (stats->downtime_set && jobInfo->timeDeltaSet && - status->downtime > jobInfo->timeDelta && + stats->downtime > jobInfo->timeDelta && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DOWNTIME_NET, - status->downtime - jobInfo->timeDelta) < 0) + stats->downtime - jobInfo->timeDelta) < 0) goto error; - if (status->setup_time_set && + if (stats->setup_time_set && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_SETUP_TIME, - status->setup_time) < 0) + stats->setup_time) < 0) goto error; if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DATA_TOTAL, - status->ram_total + - status->disk_total) < 0 || + stats->ram_total + + stats->disk_total) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DATA_PROCESSED, - status->ram_transferred + - status->disk_transferred) < 0 || + stats->ram_transferred + + stats->disk_transferred) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DATA_REMAINING, - status->ram_remaining + - status->disk_remaining) < 0) + stats->ram_remaining + + stats->disk_remaining) < 0) goto error; if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_TOTAL, - status->ram_total) < 0 || + stats->ram_total) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_PROCESSED, - status->ram_transferred) < 0 || + stats->ram_transferred) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_REMAINING, - status->ram_remaining) < 0) + stats->ram_remaining) < 0) goto error; - if (status->ram_bps && + if (stats->ram_bps && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_BPS, - status->ram_bps) < 0) + stats->ram_bps) < 0) goto error; - if (status->ram_duplicate_set) { + if (stats->ram_duplicate_set) { if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_CONSTANT, - status->ram_duplicate) < 0 || + stats->ram_duplicate) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_NORMAL, - status->ram_normal) < 0 || + stats->ram_normal) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES, - status->ram_normal_bytes) < 0) + stats->ram_normal_bytes) < 0) goto error; } if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DISK_TOTAL, - status->disk_total) < 0 || + stats->disk_total) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DISK_PROCESSED, - status->disk_transferred) < 0 || + stats->disk_transferred) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DISK_REMAINING, - status->disk_remaining) < 0) + stats->disk_remaining) < 0) goto error; - if (status->disk_bps && + if (stats->disk_bps && virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DISK_BPS, - status->disk_bps) < 0) + stats->disk_bps) < 0) goto error; - if (status->xbzrle_set) { + if (stats->xbzrle_set) { if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_COMPRESSION_CACHE, - status->xbzrle_cache_size) < 0 || + stats->xbzrle_cache_size) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_COMPRESSION_BYTES, - status->xbzrle_bytes) < 0 || + stats->xbzrle_bytes) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_COMPRESSION_PAGES, - status->xbzrle_pages) < 0 || + stats->xbzrle_pages) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES, - status->xbzrle_cache_miss) < 0 || + stats->xbzrle_cache_miss) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW, - status->xbzrle_overflow) < 0) + stats->xbzrle_overflow) < 0) goto error; } diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index cff48d7..82495dc 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -116,7 +116,7 @@ struct _qemuDomainJobInfo { destination. */ bool timeDeltaSet; /* Raw values from QEMU */ - qemuMonitorMigrationStatus status; + qemuMonitorMigrationStats stats; }; struct qemuDomainJobObj { diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 39c2c05..89eae2a 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -13042,7 +13042,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, fetch = false; /* Do not ask QEMU if migration is not even running yet */ - if (!priv->job.current || !priv->job.current->status.status) + if (!priv->job.current || !priv->job.current->stats.status) fetch = false; if (fetch && diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6be11b4..855e0e0 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -699,7 +699,7 @@ static void qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf, qemuDomainJobInfoPtr jobInfo) { - qemuMonitorMigrationStatus *status = &jobInfo->status; + qemuMonitorMigrationStats *stats = &jobInfo->stats; virBufferAddLit(buf, "<statistics>\n"); virBufferAdjustIndent(buf, 2); @@ -716,69 +716,69 @@ qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf, virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_TIME_REMAINING, jobInfo->timeRemaining); - if (status->downtime_set) + if (stats->downtime_set) virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DOWNTIME, - status->downtime); - if (status->setup_time_set) + stats->downtime); + if (stats->setup_time_set) virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_SETUP_TIME, - status->setup_time); + stats->setup_time); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_TOTAL, - status->ram_total); + stats->ram_total); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_PROCESSED, - status->ram_transferred); + stats->ram_transferred); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_REMAINING, - status->ram_remaining); + stats->ram_remaining); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_BPS, - status->ram_bps); + stats->ram_bps); - if (status->ram_duplicate_set) { + if (stats->ram_duplicate_set) { virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_CONSTANT, - status->ram_duplicate); + stats->ram_duplicate); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_NORMAL, - status->ram_normal); + stats->ram_normal); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES, - status->ram_normal_bytes); + stats->ram_normal_bytes); } virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DISK_TOTAL, - status->disk_total); + stats->disk_total); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DISK_PROCESSED, - status->disk_transferred); + stats->disk_transferred); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DISK_REMAINING, - status->disk_remaining); + stats->disk_remaining); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DISK_BPS, - status->disk_bps); + stats->disk_bps); - if (status->xbzrle_set) { + if (stats->xbzrle_set) { virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_COMPRESSION_CACHE, - status->xbzrle_cache_size); + stats->xbzrle_cache_size); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_COMPRESSION_BYTES, - status->xbzrle_bytes); + stats->xbzrle_bytes); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_COMPRESSION_PAGES, - status->xbzrle_pages); + stats->xbzrle_pages); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES, - status->xbzrle_cache_miss); + stats->xbzrle_cache_miss); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW, - status->xbzrle_overflow); + stats->xbzrle_overflow); } virBufferAdjustIndent(buf, -2); @@ -1053,7 +1053,7 @@ static qemuDomainJobInfoPtr qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) { qemuDomainJobInfoPtr jobInfo = NULL; - qemuMonitorMigrationStatus *status; + qemuMonitorMigrationStats *stats; xmlNodePtr save_ctxt = ctxt->node; if (!(ctxt->node = virXPathNode("./statistics", ctxt))) @@ -1062,7 +1062,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) if (VIR_ALLOC(jobInfo) < 0) goto cleanup; - status = &jobInfo->status; + stats = &jobInfo->stats; jobInfo->type = VIR_DOMAIN_JOB_COMPLETED; virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started); @@ -1077,49 +1077,49 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) ctxt, &jobInfo->timeRemaining); if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])", - ctxt, &status->downtime) == 0) - status->downtime_set = true; + ctxt, &stats->downtime) == 0) + stats->downtime_set = true; if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_SETUP_TIME "[1])", - ctxt, &status->setup_time) == 0) - status->setup_time_set = true; + ctxt, &stats->setup_time) == 0) + stats->setup_time_set = true; virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_TOTAL "[1])", - ctxt, &status->ram_total); + ctxt, &stats->ram_total); virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_PROCESSED "[1])", - ctxt, &status->ram_transferred); + ctxt, &stats->ram_transferred); virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_REMAINING "[1])", - ctxt, &status->ram_remaining); + ctxt, &stats->ram_remaining); virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_BPS "[1])", - ctxt, &status->ram_bps); + ctxt, &stats->ram_bps); if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_CONSTANT "[1])", - ctxt, &status->ram_duplicate) == 0) - status->ram_duplicate_set = true; + ctxt, &stats->ram_duplicate) == 0) + stats->ram_duplicate_set = true; virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL "[1])", - ctxt, &status->ram_normal); + ctxt, &stats->ram_normal); virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES "[1])", - ctxt, &status->ram_normal_bytes); + ctxt, &stats->ram_normal_bytes); virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_TOTAL "[1])", - ctxt, &status->disk_total); + ctxt, &stats->disk_total); virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_PROCESSED "[1])", - ctxt, &status->disk_transferred); + ctxt, &stats->disk_transferred); virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_REMAINING "[1])", - ctxt, &status->disk_remaining); + ctxt, &stats->disk_remaining); virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_BPS "[1])", - ctxt, &status->disk_bps); + ctxt, &stats->disk_bps); if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_CACHE "[1])", - ctxt, &status->xbzrle_cache_size) == 0) - status->xbzrle_set = true; + ctxt, &stats->xbzrle_cache_size) == 0) + stats->xbzrle_set = true; virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_BYTES "[1])", - ctxt, &status->xbzrle_bytes); + ctxt, &stats->xbzrle_bytes); virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_PAGES "[1])", - ctxt, &status->xbzrle_pages); + ctxt, &stats->xbzrle_pages); virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES "[1])", - ctxt, &status->xbzrle_cache_miss); + ctxt, &stats->xbzrle_cache_miss); virXPathULongLong("string(./" VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW "[1])", - ctxt, &status->xbzrle_overflow); + ctxt, &stats->xbzrle_overflow); cleanup: ctxt->node = save_ctxt; @@ -2518,7 +2518,7 @@ qemuMigrationWaitForSpice(virDomainObjPtr vm) static void qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) { - switch (jobInfo->status.status) { + switch (jobInfo->stats.status) { case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: jobInfo->type = VIR_DOMAIN_JOB_COMPLETED; break; @@ -2555,8 +2555,8 @@ qemuMigrationFetchJobStatus(virQEMUDriverPtr driver, if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) return -1; - memset(&jobInfo->status, 0, sizeof(jobInfo->status)); - rv = qemuMonitorGetMigrationStatus(priv->mon, &jobInfo->status); + memset(&jobInfo->stats, 0, sizeof(jobInfo->stats)); + rv = qemuMonitorGetMigrationStats(priv->mon, &jobInfo->stats); if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0) return -1; diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 249a25e..6fd08b6 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -2103,15 +2103,15 @@ qemuMonitorSetMigrationCacheSize(qemuMonitorPtr mon, int -qemuMonitorGetMigrationStatus(qemuMonitorPtr mon, - qemuMonitorMigrationStatusPtr status) +qemuMonitorGetMigrationStats(qemuMonitorPtr mon, + qemuMonitorMigrationStatsPtr stats) { QEMU_CHECK_MONITOR(mon); if (mon->json) - return qemuMonitorJSONGetMigrationStatus(mon, status); + return qemuMonitorJSONGetMigrationStats(mon, stats); else - return qemuMonitorTextGetMigrationStatus(mon, status); + return qemuMonitorTextGetMigrationStats(mon, stats); } diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 84e51cd..27ff169 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -469,9 +469,9 @@ enum { VIR_ENUM_DECL(qemuMonitorMigrationStatus) -typedef struct _qemuMonitorMigrationStatus qemuMonitorMigrationStatus; -typedef qemuMonitorMigrationStatus *qemuMonitorMigrationStatusPtr; -struct _qemuMonitorMigrationStatus { +typedef struct _qemuMonitorMigrationStats qemuMonitorMigrationStats; +typedef qemuMonitorMigrationStats *qemuMonitorMigrationStatsPtr; +struct _qemuMonitorMigrationStats { int status; unsigned long long total_time; /* total or expected depending on status */ @@ -507,8 +507,8 @@ struct _qemuMonitorMigrationStatus { unsigned long long xbzrle_overflow; }; -int qemuMonitorGetMigrationStatus(qemuMonitorPtr mon, - qemuMonitorMigrationStatusPtr status); +int qemuMonitorGetMigrationStats(qemuMonitorPtr mon, + qemuMonitorMigrationStatsPtr stats); typedef enum { QEMU_MONITOR_MIGRATION_CAPS_XBZRLE, diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index d4b6514..2d3b358 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2421,8 +2421,8 @@ qemuMonitorJSONSetMigrationCacheSize(qemuMonitorPtr mon, static int -qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, - qemuMonitorMigrationStatusPtr status) +qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, + qemuMonitorMigrationStatsPtr stats) { virJSONValuePtr ret; const char *statusstr; @@ -2441,32 +2441,32 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, return -1; } - status->status = qemuMonitorMigrationStatusTypeFromString(statusstr); - if (status->status < 0) { + stats->status = qemuMonitorMigrationStatusTypeFromString(statusstr); + if (stats->status < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("unexpected migration status in %s"), statusstr); return -1; } ignore_value(virJSONValueObjectGetNumberUlong(ret, "total-time", - &status->total_time)); - if (status->status == QEMU_MONITOR_MIGRATION_STATUS_COMPLETED) { + &stats->total_time)); + if (stats->status == QEMU_MONITOR_MIGRATION_STATUS_COMPLETED) { rc = virJSONValueObjectGetNumberUlong(ret, "downtime", - &status->downtime); + &stats->downtime); } else { rc = virJSONValueObjectGetNumberUlong(ret, "expected-downtime", - &status->downtime); + &stats->downtime); } if (rc == 0) - status->downtime_set = true; + stats->downtime_set = true; if (virJSONValueObjectGetNumberUlong(ret, "setup-time", - &status->setup_time) == 0) - status->setup_time_set = true; + &stats->setup_time) == 0) + stats->setup_time_set = true; - if (status->status == QEMU_MONITOR_MIGRATION_STATUS_ACTIVE || - status->status == QEMU_MONITOR_MIGRATION_STATUS_CANCELLING || - status->status == QEMU_MONITOR_MIGRATION_STATUS_COMPLETED) { + if (stats->status == QEMU_MONITOR_MIGRATION_STATUS_ACTIVE || + stats->status == QEMU_MONITOR_MIGRATION_STATUS_CANCELLING || + stats->status == QEMU_MONITOR_MIGRATION_STATUS_COMPLETED) { virJSONValuePtr ram = virJSONValueObjectGetObject(ret, "ram"); if (!ram) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -2475,21 +2475,21 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } if (virJSONValueObjectGetNumberUlong(ram, "transferred", - &status->ram_transferred) < 0) { + &stats->ram_transferred) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("migration was active, but RAM 'transferred' " "data was missing")); return -1; } if (virJSONValueObjectGetNumberUlong(ram, "remaining", - &status->ram_remaining) < 0) { + &stats->ram_remaining) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("migration was active, but RAM 'remaining' " "data was missing")); return -1; } if (virJSONValueObjectGetNumberUlong(ram, "total", - &status->ram_total) < 0) { + &stats->ram_total) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("migration was active, but RAM 'total' " "data was missing")); @@ -2499,21 +2499,21 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, if (virJSONValueObjectGetNumberDouble(ram, "mbps", &mbps) == 0 && mbps > 0) { /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */ - status->ram_bps = mbps * (1000 * 1000 / 8); + stats->ram_bps = mbps * (1000 * 1000 / 8); } if (virJSONValueObjectGetNumberUlong(ram, "duplicate", - &status->ram_duplicate) == 0) - status->ram_duplicate_set = true; + &stats->ram_duplicate) == 0) + stats->ram_duplicate_set = true; ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal", - &status->ram_normal)); + &stats->ram_normal)); ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes", - &status->ram_normal_bytes)); + &stats->ram_normal_bytes)); virJSONValuePtr disk = virJSONValueObjectGetObject(ret, "disk"); if (disk) { rc = virJSONValueObjectGetNumberUlong(disk, "transferred", - &status->disk_transferred); + &stats->disk_transferred); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("disk migration was active, but " @@ -2522,7 +2522,7 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } rc = virJSONValueObjectGetNumberUlong(disk, "remaining", - &status->disk_remaining); + &stats->disk_remaining); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("disk migration was active, but 'remaining' " @@ -2531,7 +2531,7 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } rc = virJSONValueObjectGetNumberUlong(disk, "total", - &status->disk_total); + &stats->disk_total); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("disk migration was active, but 'total' " @@ -2542,15 +2542,15 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, if (virJSONValueObjectGetNumberDouble(disk, "mbps", &mbps) == 0 && mbps > 0) { /* mpbs from QEMU reports Mbits/s (M as in 10^6 not Mi as 2^20) */ - status->disk_bps = mbps * (1000 * 1000 / 8); + stats->disk_bps = mbps * (1000 * 1000 / 8); } } virJSONValuePtr comp = virJSONValueObjectGetObject(ret, "xbzrle-cache"); if (comp) { - status->xbzrle_set = true; + stats->xbzrle_set = true; rc = virJSONValueObjectGetNumberUlong(comp, "cache-size", - &status->xbzrle_cache_size); + &stats->xbzrle_cache_size); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("XBZRLE is active, but 'cache-size' data " @@ -2559,7 +2559,7 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } rc = virJSONValueObjectGetNumberUlong(comp, "bytes", - &status->xbzrle_bytes); + &stats->xbzrle_bytes); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("XBZRLE is active, but 'bytes' data " @@ -2568,7 +2568,7 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } rc = virJSONValueObjectGetNumberUlong(comp, "pages", - &status->xbzrle_pages); + &stats->xbzrle_pages); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("XBZRLE is active, but 'pages' data " @@ -2577,7 +2577,7 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } rc = virJSONValueObjectGetNumberUlong(comp, "cache-miss", - &status->xbzrle_cache_miss); + &stats->xbzrle_cache_miss); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("XBZRLE is active, but 'cache-miss' data " @@ -2586,7 +2586,7 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } rc = virJSONValueObjectGetNumberUlong(comp, "overflow", - &status->xbzrle_overflow); + &stats->xbzrle_overflow); if (rc < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("XBZRLE is active, but 'overflow' data " @@ -2600,15 +2600,15 @@ qemuMonitorJSONGetMigrationStatusReply(virJSONValuePtr reply, } -int qemuMonitorJSONGetMigrationStatus(qemuMonitorPtr mon, - qemuMonitorMigrationStatusPtr status) +int qemuMonitorJSONGetMigrationStats(qemuMonitorPtr mon, + qemuMonitorMigrationStatsPtr stats) { int ret; virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("query-migrate", NULL); virJSONValuePtr reply = NULL; - memset(status, 0, sizeof(*status)); + memset(stats, 0, sizeof(*stats)); if (!cmd) return -1; @@ -2619,11 +2619,11 @@ int qemuMonitorJSONGetMigrationStatus(qemuMonitorPtr mon, ret = qemuMonitorJSONCheckError(cmd, reply); if (ret == 0 && - qemuMonitorJSONGetMigrationStatusReply(reply, status) < 0) + qemuMonitorJSONGetMigrationStatsReply(reply, stats) < 0) ret = -1; if (ret < 0) - memset(status, 0, sizeof(*status)); + memset(stats, 0, sizeof(*stats)); virJSONValueFree(cmd); virJSONValueFree(reply); return ret; diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index 374c8ea..2c27c6f 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -123,8 +123,8 @@ int qemuMonitorJSONGetMigrationCacheSize(qemuMonitorPtr mon, int qemuMonitorJSONSetMigrationCacheSize(qemuMonitorPtr mon, unsigned long long cacheSize); -int qemuMonitorJSONGetMigrationStatus(qemuMonitorPtr mon, - qemuMonitorMigrationStatusPtr status); +int qemuMonitorJSONGetMigrationStats(qemuMonitorPtr mon, + qemuMonitorMigrationStatsPtr stats); int qemuMonitorJSONGetMigrationCapabilities(qemuMonitorPtr mon, char ***capabilities); diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c index 665723d..316a942 100644 --- a/src/qemu/qemu_monitor_text.c +++ b/src/qemu/qemu_monitor_text.c @@ -1353,15 +1353,15 @@ int qemuMonitorTextSetMigrationDowntime(qemuMonitorPtr mon, #define MIGRATION_DISK_REMAINING_PREFIX "remaining disk: " #define MIGRATION_DISK_TOTAL_PREFIX "total disk: " -int qemuMonitorTextGetMigrationStatus(qemuMonitorPtr mon, - qemuMonitorMigrationStatusPtr status) +int qemuMonitorTextGetMigrationStats(qemuMonitorPtr mon, + qemuMonitorMigrationStatsPtr stats) { char *reply; char *tmp; char *end; int ret = -1; - memset(status, 0, sizeof(*status)); + memset(stats, 0, sizeof(*stats)); if (qemuMonitorHMPCommand(mon, "info migrate", &reply) < 0) return -1; @@ -1376,14 +1376,14 @@ int qemuMonitorTextGetMigrationStatus(qemuMonitorPtr mon, } *end = '\0'; - status->status = qemuMonitorMigrationStatusTypeFromString(tmp); - if (status->status < 0) { + stats->status = qemuMonitorMigrationStatusTypeFromString(tmp); + if (stats->status < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("unexpected migration status in %s"), reply); goto cleanup; } - if (status->status == QEMU_MONITOR_MIGRATION_STATUS_ACTIVE) { + if (stats->status == QEMU_MONITOR_MIGRATION_STATUS_ACTIVE) { tmp = end + 1; if (!(tmp = strstr(tmp, MIGRATION_TRANSFER_PREFIX))) @@ -1391,82 +1391,82 @@ int qemuMonitorTextGetMigrationStatus(qemuMonitorPtr mon, tmp += strlen(MIGRATION_TRANSFER_PREFIX); if (virStrToLong_ull(tmp, &end, 10, - &status->ram_transferred) < 0) { + &stats->ram_transferred) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse migration data transferred " "statistic %s"), tmp); goto cleanup; } - status->ram_transferred *= 1024; + stats->ram_transferred *= 1024; tmp = end; if (!(tmp = strstr(tmp, MIGRATION_REMAINING_PREFIX))) goto done; tmp += strlen(MIGRATION_REMAINING_PREFIX); - if (virStrToLong_ull(tmp, &end, 10, &status->ram_remaining) < 0) { + if (virStrToLong_ull(tmp, &end, 10, &stats->ram_remaining) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse migration data remaining " "statistic %s"), tmp); goto cleanup; } - status->ram_remaining *= 1024; + stats->ram_remaining *= 1024; tmp = end; if (!(tmp = strstr(tmp, MIGRATION_TOTAL_PREFIX))) goto done; tmp += strlen(MIGRATION_TOTAL_PREFIX); - if (virStrToLong_ull(tmp, &end, 10, &status->ram_total) < 0) { + if (virStrToLong_ull(tmp, &end, 10, &stats->ram_total) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse migration data total " "statistic %s"), tmp); goto cleanup; } - status->ram_total *= 1024; + stats->ram_total *= 1024; tmp = end; /* - * Check for Optional Disk Migration status + * Check for Optional Disk Migration stats */ if (!(tmp = strstr(tmp, MIGRATION_DISK_TRANSFER_PREFIX))) goto done; tmp += strlen(MIGRATION_DISK_TRANSFER_PREFIX); if (virStrToLong_ull(tmp, &end, 10, - &status->disk_transferred) < 0) { + &stats->disk_transferred) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse disk migration data " "transferred statistic %s"), tmp); goto cleanup; } - status->disk_transferred *= 1024; + stats->disk_transferred *= 1024; tmp = end; if (!(tmp = strstr(tmp, MIGRATION_DISK_REMAINING_PREFIX))) goto done; tmp += strlen(MIGRATION_DISK_REMAINING_PREFIX); - if (virStrToLong_ull(tmp, &end, 10, &status->disk_remaining) < 0) { + if (virStrToLong_ull(tmp, &end, 10, &stats->disk_remaining) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse disk migration data remaining " "statistic %s"), tmp); goto cleanup; } - status->disk_remaining *= 1024; + stats->disk_remaining *= 1024; tmp = end; if (!(tmp = strstr(tmp, MIGRATION_DISK_TOTAL_PREFIX))) goto done; tmp += strlen(MIGRATION_DISK_TOTAL_PREFIX); - if (virStrToLong_ull(tmp, &end, 10, &status->disk_total) < 0) { + if (virStrToLong_ull(tmp, &end, 10, &stats->disk_total) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, _("cannot parse disk migration data total " "statistic %s"), tmp); goto cleanup; } - status->disk_total *= 1024; + stats->disk_total *= 1024; } } @@ -1476,7 +1476,7 @@ int qemuMonitorTextGetMigrationStatus(qemuMonitorPtr mon, cleanup: VIR_FREE(reply); if (ret < 0) - memset(status, 0, sizeof(*status)); + memset(stats, 0, sizeof(*stats)); return ret; } diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h index 53c503d..44a5330 100644 --- a/src/qemu/qemu_monitor_text.h +++ b/src/qemu/qemu_monitor_text.h @@ -103,8 +103,8 @@ int qemuMonitorTextSetMigrationSpeed(qemuMonitorPtr mon, int qemuMonitorTextSetMigrationDowntime(qemuMonitorPtr mon, unsigned long long downtime); -int qemuMonitorTextGetMigrationStatus(qemuMonitorPtr mon, - qemuMonitorMigrationStatusPtr status); +int qemuMonitorTextGetMigrationStats(qemuMonitorPtr mon, + qemuMonitorMigrationStatsPtr stats); int qemuMonitorTextMigrate(qemuMonitorPtr mon, unsigned int flags, diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index f274068..39f1210 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1507,7 +1507,7 @@ qemuProcessHandleMigrationStatus(qemuMonitorPtr mon ATTRIBUTE_UNUSED, goto cleanup; } - priv->job.current->status.status = status; + priv->job.current->stats.status = status; virDomainObjBroadcast(vm); cleanup: diff --git a/tests/qemumonitorjsontest.c b/tests/qemumonitorjsontest.c index 46d4e1e..1be0bee 100644 --- a/tests/qemumonitorjsontest.c +++ b/tests/qemumonitorjsontest.c @@ -1629,23 +1629,23 @@ testQemuMonitorJSONqemuMonitorJSONGetMigrationCacheSize(const void *data) } static int -testQemuMonitorJSONqemuMonitorJSONGetMigrationStatus(const void *data) +testQemuMonitorJSONqemuMonitorJSONGetMigrationStats(const void *data) { virDomainXMLOptionPtr xmlopt = (virDomainXMLOptionPtr)data; qemuMonitorTestPtr test = qemuMonitorTestNewSimple(true, xmlopt); int ret = -1; - qemuMonitorMigrationStatus status, expectedStatus; + qemuMonitorMigrationStats stats, expectedStats; if (!test) return -1; - memset(&expectedStatus, 0, sizeof(expectedStatus)); + memset(&expectedStats, 0, sizeof(expectedStats)); - expectedStatus.status = QEMU_MONITOR_MIGRATION_STATUS_ACTIVE; - expectedStatus.total_time = 47; - expectedStatus.ram_total = 1611038720; - expectedStatus.ram_remaining = 1605013504; - expectedStatus.ram_transferred = 3625548; + expectedStats.status = QEMU_MONITOR_MIGRATION_STATUS_ACTIVE; + expectedStats.total_time = 47; + expectedStats.ram_total = 1611038720; + expectedStats.ram_remaining = 1605013504; + expectedStats.ram_transferred = 3625548; if (qemuMonitorTestAddItem(test, "query-migrate", "{" @@ -1662,10 +1662,10 @@ testQemuMonitorJSONqemuMonitorJSONGetMigrationStatus(const void *data) "}") < 0) goto cleanup; - if (qemuMonitorJSONGetMigrationStatus(qemuMonitorTestGetMonitor(test), &status) < 0) + if (qemuMonitorJSONGetMigrationStats(qemuMonitorTestGetMonitor(test), &stats) < 0) goto cleanup; - if (memcmp(&status, &expectedStatus, sizeof(status)) != 0) { + if (memcmp(&stats, &expectedStats, sizeof(stats)) != 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", "Invalid migration status"); goto cleanup; @@ -2333,7 +2333,7 @@ mymain(void) DO_TEST(qemuMonitorJSONGetBlockInfo); DO_TEST(qemuMonitorJSONGetBlockStatsInfo); DO_TEST(qemuMonitorJSONGetMigrationCacheSize); - DO_TEST(qemuMonitorJSONGetMigrationStatus); + DO_TEST(qemuMonitorJSONGetMigrationStats); DO_TEST(qemuMonitorJSONGetChardevInfo); DO_TEST(qemuMonitorJSONSetBlockIoThrottle); DO_TEST(qemuMonitorJSONGetTargetArch); -- 2.7.0

On Fri, Jan 08, 2016 at 10:49:35 +0100, Jiri Denemark wrote:
The structure actually contains migration statistics rather than just the status as the name suggests. Renaming it as qemuMonitorMigrationStats removes the confusion.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_domain.c | 84 ++++++++++++++++++------------------ src/qemu/qemu_domain.h | 2 +- src/qemu/qemu_driver.c | 2 +- src/qemu/qemu_migration.c | 100 +++++++++++++++++++++---------------------- src/qemu/qemu_monitor.c | 8 ++-- src/qemu/qemu_monitor.h | 10 ++--- src/qemu/qemu_monitor_json.c | 74 ++++++++++++++++---------------- src/qemu/qemu_monitor_json.h | 4 +- src/qemu/qemu_monitor_text.c | 40 ++++++++--------- src/qemu/qemu_monitor_text.h | 4 +- src/qemu/qemu_process.c | 2 +- tests/qemumonitorjsontest.c | 22 +++++----- 12 files changed, 176 insertions(+), 176 deletions(-)
ACK

The enum will be called qemuMonitorMigrationStatus. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 3 ++- src/qemu/qemu_monitor.h | 6 +++--- src/qemu/qemu_monitor_json.c | 24 ++++++++++++++++++------ 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 855e0e0..524102d 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2518,7 +2518,7 @@ qemuMigrationWaitForSpice(virDomainObjPtr vm) static void qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) { - switch (jobInfo->stats.status) { + switch ((qemuMonitorMigrationStatus) jobInfo->stats.status) { case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: jobInfo->type = VIR_DOMAIN_JOB_COMPLETED; break; @@ -2538,6 +2538,7 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) case QEMU_MONITOR_MIGRATION_STATUS_SETUP: case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING: + case QEMU_MONITOR_MIGRATION_STATUS_LAST: break; } } diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 27ff169..4f1c8d3 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -455,7 +455,7 @@ int qemuMonitorGetMigrationCacheSize(qemuMonitorPtr mon, int qemuMonitorSetMigrationCacheSize(qemuMonitorPtr mon, unsigned long long cacheSize); -enum { +typedef enum { QEMU_MONITOR_MIGRATION_STATUS_INACTIVE, QEMU_MONITOR_MIGRATION_STATUS_SETUP, QEMU_MONITOR_MIGRATION_STATUS_ACTIVE, @@ -465,14 +465,14 @@ enum { QEMU_MONITOR_MIGRATION_STATUS_CANCELLED, QEMU_MONITOR_MIGRATION_STATUS_LAST -}; +} qemuMonitorMigrationStatus; VIR_ENUM_DECL(qemuMonitorMigrationStatus) typedef struct _qemuMonitorMigrationStats qemuMonitorMigrationStats; typedef qemuMonitorMigrationStats *qemuMonitorMigrationStatsPtr; struct _qemuMonitorMigrationStats { - int status; + int status; /* qemuMonitorMigrationStatus */ unsigned long long total_time; /* total or expected depending on status */ bool downtime_set; diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 2d3b358..50d05b4 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2425,6 +2425,9 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, qemuMonitorMigrationStatsPtr stats) { virJSONValuePtr ret; + virJSONValuePtr ram; + virJSONValuePtr disk; + virJSONValuePtr comp; const char *statusstr; int rc; double mbps; @@ -2464,10 +2467,18 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, &stats->setup_time) == 0) stats->setup_time_set = true; - if (stats->status == QEMU_MONITOR_MIGRATION_STATUS_ACTIVE || - stats->status == QEMU_MONITOR_MIGRATION_STATUS_CANCELLING || - stats->status == QEMU_MONITOR_MIGRATION_STATUS_COMPLETED) { - virJSONValuePtr ram = virJSONValueObjectGetObject(ret, "ram"); + switch ((qemuMonitorMigrationStatus) stats->status) { + case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: + case QEMU_MONITOR_MIGRATION_STATUS_SETUP: + case QEMU_MONITOR_MIGRATION_STATUS_ERROR: + case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: + case QEMU_MONITOR_MIGRATION_STATUS_LAST: + break; + + case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: + case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: + case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING: + ram = virJSONValueObjectGetObject(ret, "ram"); if (!ram) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("migration was active, but no RAM info was set")); @@ -2510,7 +2521,7 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes", &stats->ram_normal_bytes)); - virJSONValuePtr disk = virJSONValueObjectGetObject(ret, "disk"); + disk = virJSONValueObjectGetObject(ret, "disk"); if (disk) { rc = virJSONValueObjectGetNumberUlong(disk, "transferred", &stats->disk_transferred); @@ -2546,7 +2557,7 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, } } - virJSONValuePtr comp = virJSONValueObjectGetObject(ret, "xbzrle-cache"); + comp = virJSONValueObjectGetObject(ret, "xbzrle-cache"); if (comp) { stats->xbzrle_set = true; rc = virJSONValueObjectGetNumberUlong(comp, "cache-size", @@ -2594,6 +2605,7 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, return -1; } } + break; } return 0; -- 2.7.0

On Fri, Jan 08, 2016 at 10:49:36 +0100, Jiri Denemark wrote:
The enum will be called qemuMonitorMigrationStatus.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 3 ++- src/qemu/qemu_monitor.h | 6 +++--- src/qemu/qemu_monitor_json.c | 24 ++++++++++++++++++------ 3 files changed, 23 insertions(+), 10 deletions(-)
ACK

memory_dirty_rate corresponds to dirty-pages-rate in QEMU and memory_iteration is what QEMU reports in dirty-sync-count. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- include/libvirt/libvirt-domain.h | 19 +++++++++++++++++++ src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_migration.c | 12 ++++++++++++ src/qemu/qemu_monitor.h | 2 ++ src/qemu/qemu_monitor_json.c | 4 ++++ tools/virsh-domain.c | 16 ++++++++++++++++ 6 files changed, 61 insertions(+) diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index a1ea6a5..d26faa5 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -2724,6 +2724,25 @@ int virDomainAbortJob(virDomainPtr dom); */ # define VIR_DOMAIN_JOB_MEMORY_BPS "memory_bps" +/** VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE: + * + * virDomainGetJobStats field: number of memory pages dirtied by the guest + * per second, as VIR_TYPED_PARAM_ULLONG. This statistics makes sense only + * when live migration is running. + */ +# define VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "memory_dirty_rate" + +/** + * VIR_DOMAIN_JOB_MEMORY_ITERATION: + * + * virDomainGetJobStats field: current iteration over domain's memory + * during live migration, as VIR_TYPED_PARAM_ULLONG. This is set to zero + * when memory starts to be transferred and the value is increased by one + * every time a new iteration is started to transfer memory pages dirtied + * since the last iteration. + */ +# define VIR_DOMAIN_JOB_MEMORY_ITERATION "memory_iteration" + /** * VIR_DOMAIN_JOB_DISK_TOTAL: * diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index fb50c91..1771601 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -384,6 +384,14 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, } if (virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + stats->ram_dirty_rate) < 0 || + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_MEMORY_ITERATION, + stats->ram_iteration) < 0) + goto error; + + if (virTypedParamsAddULLong(&par, &npar, &maxpar, VIR_DOMAIN_JOB_DISK_TOTAL, stats->disk_total) < 0 || virTypedParamsAddULLong(&par, &npar, &maxpar, diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 524102d..b02ebd7 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -751,6 +751,13 @@ qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf, } virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + stats->ram_dirty_rate); + virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", + VIR_DOMAIN_JOB_MEMORY_ITERATION, + stats->ram_iteration); + + virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", VIR_DOMAIN_JOB_DISK_TOTAL, stats->disk_total); virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", @@ -1100,6 +1107,11 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES "[1])", ctxt, &stats->ram_normal_bytes); + virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "[1])", + ctxt, &stats->ram_dirty_rate); + virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_ITERATION "[1])", + ctxt, &stats->ram_iteration); + virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_TOTAL "[1])", ctxt, &stats->disk_total); virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_PROCESSED "[1])", diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 4f1c8d3..4193ad2 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -493,6 +493,8 @@ struct _qemuMonitorMigrationStats { unsigned long long ram_duplicate; unsigned long long ram_normal; unsigned long long ram_normal_bytes; + unsigned long long ram_dirty_rate; + unsigned long long ram_iteration; unsigned long long disk_transferred; unsigned long long disk_remaining; diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 50d05b4..077be3a 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2520,6 +2520,10 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply, &stats->ram_normal)); ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes", &stats->ram_normal_bytes)); + ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate", + &stats->ram_dirty_rate)); + ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count", + &stats->ram_iteration)); disk = virJSONValueObjectGetObject(ret, "disk"); if (disk) { diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index edbbc34..84202a5 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6045,6 +6045,22 @@ cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd) vshPrint(ctl, "%-17s %-.3lf %s/s\n", _("Memory bandwidth:"), val, unit); } + + if ((rc = virTypedParamsGetULLong(params, nparams, + VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE, + &value)) < 0) { + goto save_error; + } else if (rc) { + vshPrint(ctl, "%-17s %-12llu pages/s\n", _("Dirty rate:"), value); + } + + if ((rc = virTypedParamsGetULLong(params, nparams, + VIR_DOMAIN_JOB_MEMORY_ITERATION, + &value)) < 0) { + goto save_error; + } else if (rc) { + vshPrint(ctl, "%-17s %-12llu\n", _("Iteration:"), value); + } } if (info.fileTotal || info.fileRemaining || info.fileProcessed) { -- 2.7.0

On Fri, Jan 08, 2016 at 10:49:37 +0100, Jiri Denemark wrote:
memory_dirty_rate corresponds to dirty-pages-rate in QEMU and memory_iteration is what QEMU reports in dirty-sync-count.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Looks more like a feature rather than just "cleanup"
--- include/libvirt/libvirt-domain.h | 19 +++++++++++++++++++ src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_migration.c | 12 ++++++++++++ src/qemu/qemu_monitor.h | 2 ++ src/qemu/qemu_monitor_json.c | 4 ++++ tools/virsh-domain.c | 16 ++++++++++++++++ 6 files changed, 61 insertions(+)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index a1ea6a5..d26faa5 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -2724,6 +2724,25 @@ int virDomainAbortJob(virDomainPtr dom); */ # define VIR_DOMAIN_JOB_MEMORY_BPS "memory_bps"
+/** VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE: + * + * virDomainGetJobStats field: number of memory pages dirtied by the guest + * per second, as VIR_TYPED_PARAM_ULLONG. This statistics makes sense only + * when live migration is running.
Do we document somewhere how to convert page sizes into memory sizes (or rather how big the pages are/whether hugepages count as 1 or more etc...?)
+ */ +# define VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "memory_dirty_rate" + +/** + * VIR_DOMAIN_JOB_MEMORY_ITERATION: + * + * virDomainGetJobStats field: current iteration over domain's memory + * during live migration, as VIR_TYPED_PARAM_ULLONG. This is set to zero + * when memory starts to be transferred and the value is increased by one + * every time a new iteration is started to transfer memory pages dirtied + * since the last iteration. + */ +# define VIR_DOMAIN_JOB_MEMORY_ITERATION "memory_iteration"
ACK

On Fri, Jan 08, 2016 at 15:56:02 +0100, Peter Krempa wrote:
On Fri, Jan 08, 2016 at 10:49:37 +0100, Jiri Denemark wrote:
memory_dirty_rate corresponds to dirty-pages-rate in QEMU and memory_iteration is what QEMU reports in dirty-sync-count.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Looks more like a feature rather than just "cleanup"
--- include/libvirt/libvirt-domain.h | 19 +++++++++++++++++++ src/qemu/qemu_domain.c | 8 ++++++++ src/qemu/qemu_migration.c | 12 ++++++++++++ src/qemu/qemu_monitor.h | 2 ++ src/qemu/qemu_monitor_json.c | 4 ++++ tools/virsh-domain.c | 16 ++++++++++++++++ 6 files changed, 61 insertions(+)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index a1ea6a5..d26faa5 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -2724,6 +2724,25 @@ int virDomainAbortJob(virDomainPtr dom); */ # define VIR_DOMAIN_JOB_MEMORY_BPS "memory_bps"
+/** VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE: + * + * virDomainGetJobStats field: number of memory pages dirtied by the guest + * per second, as VIR_TYPED_PARAM_ULLONG. This statistics makes sense only + * when live migration is running.
Do we document somewhere how to convert page sizes into memory sizes (or rather how big the pages are/whether hugepages count as 1 or more etc...?)
No we don't :-) And QEMU doesn't document that either. Honestly I doubt anyone would even try to convert this into memory sizes. It's enough to monitor the trend to see whether a migration is converging or not. If the value is decreasing, there is a chance migration will eventually finish, otherwise, it's obvious that the guest is dirtying more memory than QEMU can transfer to the destination host.
+ */ +# define VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "memory_dirty_rate" + +/** + * VIR_DOMAIN_JOB_MEMORY_ITERATION: + * + * virDomainGetJobStats field: current iteration over domain's memory + * during live migration, as VIR_TYPED_PARAM_ULLONG. This is set to zero + * when memory starts to be transferred and the value is increased by one + * every time a new iteration is started to transfer memory pages dirtied + * since the last iteration. + */ +# define VIR_DOMAIN_JOB_MEMORY_ITERATION "memory_iteration"
ACK
I pushed this series, thanks for the review. Jirka

To get rid of a giant if-else block which is very easy to get lost in. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: This patch might be a bit easier to review using -b. src/qemu/qemu_migration.c | 255 +++++++++++++++++++++++----------------------- 1 file changed, 127 insertions(+), 128 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index b02ebd7..49ed777 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5792,6 +5792,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; + unsigned long long timeReceived = 0; + virObjectEventPtr event; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5806,6 +5808,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, goto cleanup; } + ignore_value(virTimeMillisNow(&timeReceived)); + qemuMigrationJobStartPhase(driver, vm, v3proto ? QEMU_MIGRATION_PHASE_FINISH3 : QEMU_MIGRATION_PHASE_FINISH2); @@ -5823,132 +5827,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, cookieinlen, cookie_flags))) goto endjob; - /* Did the migration go as planned? If yes, return the domain - * object, but if no, clean up the empty qemu process. - */ if (flags & VIR_MIGRATE_OFFLINE) { - if (retcode != 0 || - qemuMigrationPersist(driver, vm, mig, false) < 0) - goto endjob; + if (retcode == 0 && + qemuMigrationPersist(driver, vm, mig, false) == 0) + dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); + goto endjob; + } - dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - } else if (retcode == 0) { - unsigned long long timeReceived = 0; - - ignore_value(virTimeMillisNow(&timeReceived)); - - if (!virDomainObjIsActive(vm)) { - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("guest unexpectedly quit")); - qemuMigrationErrorReport(driver, vm->def->name); - goto endjob; - } - - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) - goto endjob; - - if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); - - if (qemuMigrationStopNBDServer(driver, vm, mig) < 0) - goto endjob; - - if (flags & VIR_MIGRATE_PERSIST_DEST) { - if (qemuMigrationPersist(driver, vm, mig, !v3proto) < 0) { - /* Hmpf. Migration was successful, but making it persistent - * was not. If we report successful, then when this domain - * shuts down, management tools are in for a surprise. On the - * other hand, if we report failure, then the management tools - * might try to restart the domain on the source side, even - * though the domain is actually running on the destination. - * Pretend success and hope that this is a rare situation and - * management tools are smart. - * - * However, in v3 protocol, the source VM is still available - * to restart during confirm() step, so we kill it off now. - */ - if (v3proto) - goto endjob; - } - } - - /* We need to wait for QEMU to process all data sent by the source - * before starting guest CPUs. - */ - if (qemuMigrationWaitForDestCompletion(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { - /* There's not much we can do for v2 protocol since the - * original domain on the source host is already gone. - */ - if (v3proto) - goto endjob; - } - - if (!(flags & VIR_MIGRATE_PAUSED)) { - /* run 'cont' on the destination, which allows migration on qemu - * >= 0.10.6 to work properly. This isn't strictly necessary on - * older qemu's, but it also doesn't hurt anything there - */ - if (qemuProcessStartCPUs(driver, vm, dconn, - VIR_DOMAIN_RUNNING_MIGRATED, - QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { - if (virGetLastError() == NULL) - virReportError(VIR_ERR_INTERNAL_ERROR, - "%s", _("resume operation failed")); - /* Need to save the current error, in case shutting - * down the process overwrites it - */ - orig_err = virSaveLastError(); - - /* - * In v3 protocol, the source VM is still available to - * restart during confirm() step, so we kill it off - * now. - * In v2 protocol, the source is dead, so we leave - * target in paused state, in case admin can fix - * things up - */ - if (v3proto) - goto endjob; - } - } - - if (mig->jobInfo) { - qemuDomainJobInfoPtr jobInfo = mig->jobInfo; - priv->job.completed = jobInfo; - mig->jobInfo = NULL; - - if (jobInfo->sent && timeReceived) { - jobInfo->timeDelta = timeReceived - jobInfo->sent; - jobInfo->received = timeReceived; - jobInfo->timeDeltaSet = true; - } - qemuDomainJobInfoUpdateTime(priv->job.completed); - qemuDomainJobInfoUpdateDowntime(priv->job.completed); - } - - dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - - qemuDomainEventQueue(driver, - virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED)); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, - VIR_DOMAIN_PAUSED_USER); - qemuDomainEventQueue(driver, - virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED)); - } - - if (virDomainObjIsActive(vm) && - virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) - VIR_WARN("Failed to save status on vm %s", vm->def->name); - - /* Guest is successfully running, so cancel previous auto destroy */ - qemuProcessAutoDestroyRemove(driver, vm); - } else { + if (retcode != 0) { qemuDomainJobInfo info; /* Check for a possible error on the monitor in case Finish was called @@ -5957,8 +5843,121 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, &info); + goto endjob; } + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest unexpectedly quit")); + qemuMigrationErrorReport(driver, vm->def->name); + goto endjob; + } + + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) + goto endjob; + + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation"); + + if (qemuMigrationStopNBDServer(driver, vm, mig) < 0) + goto endjob; + + if (flags & VIR_MIGRATE_PERSIST_DEST) { + if (qemuMigrationPersist(driver, vm, mig, !v3proto) < 0) { + /* Hmpf. Migration was successful, but making it persistent + * was not. If we report successful, then when this domain + * shuts down, management tools are in for a surprise. On the + * other hand, if we report failure, then the management tools + * might try to restart the domain on the source side, even + * though the domain is actually running on the destination. + * Pretend success and hope that this is a rare situation and + * management tools are smart. + * + * However, in v3 protocol, the source VM is still available + * to restart during confirm() step, so we kill it off now. + */ + if (v3proto) + goto endjob; + } + } + + /* We need to wait for QEMU to process all data sent by the source + * before starting guest CPUs. + */ + if (qemuMigrationWaitForDestCompletion(driver, vm, + QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { + /* There's not much we can do for v2 protocol since the + * original domain on the source host is already gone. + */ + if (v3proto) + goto endjob; + } + + if (!(flags & VIR_MIGRATE_PAUSED)) { + /* run 'cont' on the destination, which allows migration on qemu + * >= 0.10.6 to work properly. This isn't strictly necessary on + * older qemu's, but it also doesn't hurt anything there + */ + if (qemuProcessStartCPUs(driver, vm, dconn, + VIR_DOMAIN_RUNNING_MIGRATED, + QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { + if (virGetLastError() == NULL) + virReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("resume operation failed")); + /* Need to save the current error, in case shutting + * down the process overwrites it + */ + orig_err = virSaveLastError(); + + /* + * In v3 protocol, the source VM is still available to + * restart during confirm() step, so we kill it off + * now. + * In v2 protocol, the source is dead, so we leave + * target in paused state, in case admin can fix + * things up. + */ + if (v3proto) + goto endjob; + } + } + + if (mig->jobInfo) { + qemuDomainJobInfoPtr jobInfo = mig->jobInfo; + priv->job.completed = jobInfo; + mig->jobInfo = NULL; + + if (jobInfo->sent && timeReceived) { + jobInfo->timeDelta = timeReceived - jobInfo->sent; + jobInfo->received = timeReceived; + jobInfo->timeDeltaSet = true; + } + qemuDomainJobInfoUpdateTime(priv->job.completed); + qemuDomainJobInfoUpdateDowntime(priv->job.completed); + } + + dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); + + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + qemuDomainEventQueue(driver, event); + + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + qemuDomainEventQueue(driver, event); + } + + if (virDomainObjIsActive(vm) && + virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) + VIR_WARN("Failed to save status on vm %s", vm->def->name); + + /* Guest is successfully running, so cancel previous auto destroy */ + qemuProcessAutoDestroyRemove(driver, vm); + endjob: if (!dom && !(flags & VIR_MIGRATE_OFFLINE) && @@ -5966,10 +5965,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); - qemuDomainEventQueue(driver, - virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED)); + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + qemuDomainEventQueue(driver, event); } if (dom && -- 2.7.0

On Fri, Jan 08, 2016 at 10:49:38 +0100, Jiri Denemark wrote:
To get rid of a giant if-else block which is very easy to get lost in.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: This patch might be a bit easier to review using -b.
Slightly true.
src/qemu/qemu_migration.c | 255 +++++++++++++++++++++++----------------------- 1 file changed, 127 insertions(+), 128 deletions(-)
ACK
participants (2)
-
Jiri Denemark
-
Peter Krempa