memory_dirty_rate corresponds to dirty-pages-rate in QEMU and
memory_iteration is what QEMU reports in dirty-sync-count.
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
include/libvirt/libvirt-domain.h | 19 +++++++++++++++++++
src/qemu/qemu_domain.c | 8 ++++++++
src/qemu/qemu_migration.c | 12 ++++++++++++
src/qemu/qemu_monitor.h | 2 ++
src/qemu/qemu_monitor_json.c | 4 ++++
tools/virsh-domain.c | 16 ++++++++++++++++
6 files changed, 61 insertions(+)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index a1ea6a5..d26faa5 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -2724,6 +2724,25 @@ int virDomainAbortJob(virDomainPtr dom);
*/
# define VIR_DOMAIN_JOB_MEMORY_BPS "memory_bps"
+/** VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE:
+ *
+ * virDomainGetJobStats field: number of memory pages dirtied by the guest
+ * per second, as VIR_TYPED_PARAM_ULLONG. This statistics makes sense only
+ * when live migration is running.
+ */
+# define VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE "memory_dirty_rate"
+
+/**
+ * VIR_DOMAIN_JOB_MEMORY_ITERATION:
+ *
+ * virDomainGetJobStats field: current iteration over domain's memory
+ * during live migration, as VIR_TYPED_PARAM_ULLONG. This is set to zero
+ * when memory starts to be transferred and the value is increased by one
+ * every time a new iteration is started to transfer memory pages dirtied
+ * since the last iteration.
+ */
+# define VIR_DOMAIN_JOB_MEMORY_ITERATION "memory_iteration"
+
/**
* VIR_DOMAIN_JOB_DISK_TOTAL:
*
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index fb50c91..1771601 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -384,6 +384,14 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
}
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+ stats->ram_dirty_rate) < 0 ||
+ virTypedParamsAddULLong(&par, &npar, &maxpar,
+ VIR_DOMAIN_JOB_MEMORY_ITERATION,
+ stats->ram_iteration) < 0)
+ goto error;
+
+ if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DISK_TOTAL,
stats->disk_total) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 524102d..b02ebd7 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -751,6 +751,13 @@ qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf,
}
virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
+ VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+ stats->ram_dirty_rate);
+ virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
+ VIR_DOMAIN_JOB_MEMORY_ITERATION,
+ stats->ram_iteration);
+
+ virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
VIR_DOMAIN_JOB_DISK_TOTAL,
stats->disk_total);
virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
@@ -1100,6 +1107,11 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES
"[1])",
ctxt, &stats->ram_normal_bytes);
+ virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE
"[1])",
+ ctxt, &stats->ram_dirty_rate);
+ virXPathULongLong("string(./" VIR_DOMAIN_JOB_MEMORY_ITERATION
"[1])",
+ ctxt, &stats->ram_iteration);
+
virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_TOTAL "[1])",
ctxt, &stats->disk_total);
virXPathULongLong("string(./" VIR_DOMAIN_JOB_DISK_PROCESSED
"[1])",
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index 4f1c8d3..4193ad2 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -493,6 +493,8 @@ struct _qemuMonitorMigrationStats {
unsigned long long ram_duplicate;
unsigned long long ram_normal;
unsigned long long ram_normal_bytes;
+ unsigned long long ram_dirty_rate;
+ unsigned long long ram_iteration;
unsigned long long disk_transferred;
unsigned long long disk_remaining;
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 50d05b4..077be3a 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -2520,6 +2520,10 @@ qemuMonitorJSONGetMigrationStatsReply(virJSONValuePtr reply,
&stats->ram_normal));
ignore_value(virJSONValueObjectGetNumberUlong(ram, "normal-bytes",
&stats->ram_normal_bytes));
+ ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-pages-rate",
+ &stats->ram_dirty_rate));
+ ignore_value(virJSONValueObjectGetNumberUlong(ram, "dirty-sync-count",
+ &stats->ram_iteration));
disk = virJSONValueObjectGetObject(ret, "disk");
if (disk) {
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index edbbc34..84202a5 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -6045,6 +6045,22 @@ cmdDomjobinfo(vshControl *ctl, const vshCmd *cmd)
vshPrint(ctl, "%-17s %-.3lf %s/s\n",
_("Memory bandwidth:"), val, unit);
}
+
+ if ((rc = virTypedParamsGetULLong(params, nparams,
+ VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
+ &value)) < 0) {
+ goto save_error;
+ } else if (rc) {
+ vshPrint(ctl, "%-17s %-12llu pages/s\n", _("Dirty
rate:"), value);
+ }
+
+ if ((rc = virTypedParamsGetULLong(params, nparams,
+ VIR_DOMAIN_JOB_MEMORY_ITERATION,
+ &value)) < 0) {
+ goto save_error;
+ } else if (rc) {
+ vshPrint(ctl, "%-17s %-12llu\n", _("Iteration:"),
value);
+ }
}
if (info.fileTotal || info.fileRemaining || info.fileProcessed) {
--
2.7.0