From: Huaqiang <huaqiang.wang(a)intel.com>
Introduce an option '--memory' for showing memory related
information. The memory bandwidth infomatio is listed as:
Domain: 'libvirt-vm'
memory.bandwidth.monitor.count=4
memory.bandwidth.monitor.0.name=vcpus_0-4
memory.bandwidth.monitor.0.vcpus=0-4
memory.bandwidth.monitor.0.node.count=2
memory.bandwidth.monitor.0.node.0.id=0
memory.bandwidth.monitor.0.node.0.bytes.total=10208067584
memory.bandwidth.monitor.0.node.0.bytes.local=4807114752
memory.bandwidth.monitor.0.node.1.id=1
memory.bandwidth.monitor.0.node.1.bytes.total=8693735424
memory.bandwidth.monitor.0.node.1.bytes.local=5850161152
memory.bandwidth.monitor.1.name=vcpus_7
memory.bandwidth.monitor.1.vcpus=7
memory.bandwidth.monitor.1.node.count=2
memory.bandwidth.monitor.1.node.0.id=0
memory.bandwidth.monitor.1.node.0.bytes.total=853811200
memory.bandwidth.monitor.1.node.0.bytes.local=290701312
memory.bandwidth.monitor.1.node.1.id=1
memory.bandwidth.monitor.1.node.1.bytes.total=406044672
memory.bandwidth.monitor.1.node.1.bytes.local=229425152
Signed-off-by: Huaqiang <huaqiang.wang(a)intel.com>
---
include/libvirt/libvirt-domain.h | 1 +
src/libvirt-domain.c | 21 +++++++
src/qemu/qemu_driver.c | 99 ++++++++++++++++++++++++++++++++
tools/virsh-domain-monitor.c | 7 +++
tools/virsh.pod | 23 +++++++-
5 files changed, 149 insertions(+), 2 deletions(-)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index 22277b0a84..2b621ff162 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -2146,6 +2146,7 @@ typedef enum {
VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
VIR_DOMAIN_STATS_PERF = (1 << 6), /* return domain perf event info */
VIR_DOMAIN_STATS_IOTHREAD = (1 << 7), /* return iothread poll info */
+ VIR_DOMAIN_STATS_MEMORY= (1 << 8), /* return domain memory info */
} virDomainStatsTypes;
typedef enum {
diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c
index dcab179e6e..c8c543ccde 100644
--- a/src/libvirt-domain.c
+++ b/src/libvirt-domain.c
@@ -11641,6 +11641,27 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* hypervisor to choose how to shrink the
* polling time.
*
+ * VIR_DOMAIN_STATS_MEMORY:
+ * Return memory bandwidth statistics and the usage information. The typed
+ * parameter keys are in this format:
+ *
+ * "memory.bandwidth.monitor.count" - the number of memory bandwidth
+ * monitors for this domain
+ * "memory.bandwidth.monitor.<num>.name" - the name of monitor
<num>
+ * "memory.bandwidth.monitor.<num>.vcpus" - the vcpu list of monitor
<num>
+ * "memory.bandwidth.monitor.<num>.node.count" - the number of
memory
+ * controller in monitor <num>
+ * "memory.bandwidth.monitor.<num>.node.<index>.id" - host
allocated memory
+ * controller id for controller
+ * <index> of monitor <num>
+ * "memory.bandwidth.monitor.<num>.node.<index>.bytes.local" -
the
+ * accumulative bytes consumed by @vcpus that passing
+ * through the memory controller in the same processor
+ * that the scheduled host CPU belongs to.
+ * "memory.bandwidth.monitor.<num>.node.<index>.bytes.total" -
the total
+ * bytes consumed by @vcpus that passing through all
+ * memory controllers, either local or remote controller.
+ *
* Note that entire stats groups or individual stat fields may be missing from
* the output in case they are not supported by the given hypervisor, are not
* applicable for the current state of the guest domain, or their retrieval
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e396358871..37a986a1bd 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -20496,6 +20496,9 @@ qemuDomainGetResctrlMonData(virQEMUDriverPtr driver,
features = caps->host.cache.monitor->features;
break;
case VIR_RESCTRL_MONITOR_TYPE_MEMBW:
+ if (caps->host.memBW.monitor)
+ features = caps->host.memBW.monitor->features;
+ break;
case VIR_RESCTRL_MONITOR_TYPE_UNSUPPORT:
case VIR_RESCTRL_MONITOR_TYPE_LAST:
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
@@ -20548,6 +20551,90 @@ qemuDomainGetResctrlMonData(virQEMUDriverPtr driver,
}
+static int
+qemuDomainGetStatsMemoryBandwidth(virQEMUDriverPtr driver,
+ virDomainObjPtr dom,
+ virTypedParamListPtr params)
+{
+ virQEMUResctrlMonDataPtr *resdata = NULL;
+ char **features = NULL;
+ size_t nresdata = 0;
+ size_t i = 0;
+ size_t j = 0;
+ size_t k = 0;
+ int ret = -1;
+
+ if (!virDomainObjIsActive(dom))
+ return 0;
+
+ if (qemuDomainGetResctrlMonData(driver, dom, &resdata, &nresdata,
+ VIR_RESCTRL_MONITOR_TYPE_MEMBW) < 0)
+ goto cleanup;
+
+ if (nresdata == 0)
+ return 0;
+
+ if (virTypedParamListAddUInt(params, nresdata,
+ "memory.bandwidth.monitor.count") < 0)
+ goto cleanup;
+
+ for (i = 0; i < nresdata; i++) {
+ if (virTypedParamListAddString(params, resdata[i]->name,
+ "memory.bandwidth.monitor.%zu.name",
+ i) < 0)
+ goto cleanup;
+
+ if (virTypedParamListAddString(params, resdata[i]->vcpus,
+ "memory.bandwidth.monitor.%zu.vcpus",
+ i) < 0)
+ goto cleanup;
+
+ if (virTypedParamListAddUInt(params, resdata[i]->nstats,
+
"memory.bandwidth.monitor.%zu.node.count",
+ i) < 0)
+ goto cleanup;
+
+
+ for (j = 0; j < resdata[i]->nstats; j++) {
+ if (virTypedParamListAddUInt(params, resdata[i]->stats[j]->id,
+ "memory.bandwidth.monitor.%zu."
+ "node.%zu.id",
+ i, j) < 0)
+ goto cleanup;
+
+
+ features = resdata[i]->stats[j]->features;
+ for (k = 0; features[k]; k++) {
+ if (STREQ(features[k], "mbm_local_bytes")) {
+ if (virTypedParamListAddULLong(params,
+ resdata[i]->stats[j]->vals[k],
+ "memory.bandwidth.monitor."
+ "%zu.node.%zu.bytes.local",
+ i, j) < 0)
+ goto cleanup;
+ }
+
+ if (STREQ(features[k], "mbm_total_bytes")) {
+ if (virTypedParamListAddULLong(params,
+ resdata[i]->stats[j]->vals[k],
+ "memory.bandwidth.monitor."
+ "%zu.node.%zu.bytes.total",
+ i, j) < 0)
+ goto cleanup;
+ }
+ }
+ }
+ }
+
+ ret = 0;
+ cleanup:
+ for (i = 0; i < nresdata; i++)
+ qemuDomainFreeResctrlMonData(resdata[i]);
+ VIR_FREE(resdata);
+ return ret;
+}
+
+
static int
qemuDomainGetStatsCpuCache(virQEMUDriverPtr driver,
virDomainObjPtr dom,
@@ -20645,6 +20732,17 @@ qemuDomainGetStatsCpu(virQEMUDriverPtr driver,
}
+static int
+qemuDomainGetStatsMemory(virQEMUDriverPtr driver,
+ virDomainObjPtr dom,
+ virTypedParamListPtr params,
+ unsigned int privflags G_GNUC_UNUSED)
+
+{
+ return qemuDomainGetStatsMemoryBandwidth(driver, dom, params);
+}
+
+
static int
qemuDomainGetStatsBalloon(virQEMUDriverPtr driver,
virDomainObjPtr dom,
@@ -21314,6 +21412,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[]
= {
{ qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
{ qemuDomainGetStatsPerf, VIR_DOMAIN_STATS_PERF, false },
{ qemuDomainGetStatsIOThread, VIR_DOMAIN_STATS_IOTHREAD, true },
+ { qemuDomainGetStatsMemory, VIR_DOMAIN_STATS_MEMORY, false },
{ NULL, 0, false }
};
diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c
index 034c913d5e..8abd0f2d0b 100644
--- a/tools/virsh-domain-monitor.c
+++ b/tools/virsh-domain-monitor.c
@@ -2111,6 +2111,10 @@ static const vshCmdOptDef opts_domstats[] = {
.type = VSH_OT_BOOL,
.help = N_("report domain IOThread information"),
},
+ {.name = "memory",
+ .type = VSH_OT_BOOL,
+ .help = N_("report domain memory usage"),
+ },
{.name = "list-active",
.type = VSH_OT_BOOL,
.help = N_("list only active domains"),
@@ -2227,6 +2231,9 @@ cmdDomstats(vshControl *ctl, const vshCmd *cmd)
if (vshCommandOptBool(cmd, "iothread"))
stats |= VIR_DOMAIN_STATS_IOTHREAD;
+ if (vshCommandOptBool(cmd, "memory"))
+ stats |= VIR_DOMAIN_STATS_MEMORY;
+
if (vshCommandOptBool(cmd, "list-active"))
flags |= VIR_CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE;
diff --git a/tools/virsh.pod b/tools/virsh.pod
index cf2798e71a..30effffcba 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -1483,7 +1483,7 @@ reason for the state.
=item B<domstats> [I<--raw>] [I<--enforce>] [I<--backing>]
[I<--nowait>]
[I<--state>] [I<--cpu-total>] [I<--balloon>] [I<--vcpu>]
[I<--interface>]
-[I<--block>] [I<--perf>] [I<--iothread>]
+[I<--block>] [I<--perf>] [I<--iothread>] [I<--memory>]
[[I<--list-active>] [I<--list-inactive>]
[I<--list-persistent>] [I<--list-transient>] [I<--list-running>]
[I<--list-paused>] [I<--list-shutoff>] [I<--list-other>]] |
[I<domain> ...]
@@ -1502,7 +1502,7 @@ behavior use the I<--raw> flag.
The individual statistics groups are selectable via specific flags. By
default all supported statistics groups are returned. Supported
statistics groups flags are: I<--state>, I<--cpu-total>, I<--balloon>,
-I<--vcpu>, I<--interface>, I<--block>, I<--perf>,
I<--iothread>.
+I<--vcpu>, I<--interface>, I<--block>, I<--perf>,
I<--iothread>, I<--memory>.
Note that - depending on the hypervisor type and version or the domain state
- not all of the following statistics may be returned.
@@ -1670,6 +1670,25 @@ not available for statistical purposes.
0 (zero) indicates shrink is managed by
the hypervisor.
+I<--memory> returns:
+
+ "memory.bandwidth.monitor.count" - the number of memory bandwidth
+ monitors for this domain
+ "memory.bandwidth.monitor.<num>.name" - the name of monitor
<num>
+ "memory.bandwidth.monitor.<num>.vcpus" - the vcpu list of monitor
<num>
+ "memory.bandwidth.monitor.<num>.node.count" - the number of memory
+ controller in monitor <num>
+ "memory.bandwidth.monitor.<num>.node.<index>.id" - host allocated
memory
+ controller id for controller
+ <index> of monitor <num>
+ "memory.bandwidth.monitor.<num>.node.<index>.bytes.local" - the
accumulative
+ bytes consumed by @vcpus that passing through
+ the memory controller in the same processor
+ that the scheduled host CPU belongs to.
+ "memory.bandwidth.monitor.<num>.node.<index>.bytes.total" - the
total
+ bytes consumed by @vcpus that passing through all
+ memory controllers, either local or remote controller.
+
Selecting a specific statistics groups doesn't guarantee that the
daemon supports the selected group of stats. Flag I<--enforce>
forces the command to fail if the daemon doesn't support the
--
2.23.0