Wire up backing chain recursion. Note that for now, we just use
the same allocation numbers for read-only backing files as what
offline domains would report. It is not the correct allocation
number for qcow2 over block devices during block-commit, and it
misses out on the fact that qemu also reports read statistics on
backing files that are worth knowing about (seriously - for a
thin-provisioned setup, it would be nice to easily get at a count
of how many reads were serviced from the backing file in relation
to reads serviced by the active layer). But it is at least
sufficient to prove that the algorithm is working, and to let
other people start coding to the interface while waiting for
later patches that get the correct information.
For a running domain, where one of the two images has a backing
file, I see the traditional output:
$ virsh domstats --block testvm2
Domain: 'testvm2'
block.count=2
block.0.name=vda
block.0.path=/tmp/wrapper.qcow2
block.0.rd.reqs=1
block.0.rd.bytes=512
block.0.rd.times=28858
block.0.wr.reqs=0
block.0.wr.bytes=0
block.0.wr.times=0
block.0.fl.reqs=0
block.0.fl.times=0
block.0.allocation=0
block.0.capacity=1310720000
block.0.physical=200704
block.1.name=vdb
block.1.path=/dev/sda7
block.1.rd.reqs=0
block.1.rd.bytes=0
block.1.rd.times=0
block.1.wr.reqs=0
block.1.wr.bytes=0
block.1.wr.times=0
block.1.fl.reqs=0
block.1.fl.times=0
block.1.allocation=0
block.1.capacity=1310720000
vs. the new output:
$ virsh domstats --block --backing testvm2
Domain: 'testvm2'
block.count=3
block.0.name=vda
block.0.path=/tmp/wrapper.qcow2
block.0.rd.reqs=1
block.0.rd.bytes=512
block.0.rd.times=28858
block.0.wr.reqs=0
block.0.wr.bytes=0
block.0.wr.times=0
block.0.fl.reqs=0
block.0.fl.times=0
block.0.allocation=0
block.0.capacity=1310720000
block.0.physical=200704
block.1.name=vda
block.1.path=/dev/sda6
block.1.backingIndex=1
block.1.allocation=1073741824
block.1.capacity=1310720000
block.1.physical=1073741824
block.2.name=vdb
block.2.path=/dev/sda7
block.2.rd.reqs=0
block.2.rd.bytes=0
block.2.rd.times=0
block.2.wr.reqs=0
block.2.wr.bytes=0
block.2.wr.times=0
block.2.fl.reqs=0
block.2.fl.times=0
block.2.allocation=0
block.2.capacity=1310720000
* src/qemu/qemu_driver.c (QEMU_DOMAIN_STATS_BACKING): New internal
enum bit.
(qemuConnectGetAllDomainStats): Recognize new user flag, and pass
details to...
(qemuDomainGetStatsBlock): ...here, where we can do longer recursion.
(qemuDomainGetStatsOneBlock): Output new field.
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
src/qemu/qemu_driver.c | 55 +++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 46 insertions(+), 9 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index feaa4a2..b57beeb 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -18256,8 +18256,10 @@ qemuDomainGetStatsState(virQEMUDriverPtr driver
ATTRIBUTE_UNUSED,
typedef enum {
- QEMU_DOMAIN_STATS_HAVE_JOB = (1 << 0), /* job is entered, monitor can be
- accessed */
+ QEMU_DOMAIN_STATS_HAVE_JOB = 1 << 0, /* job is entered, monitor can be
+ accessed */
+ QEMU_DOMAIN_STATS_BACKING = 1 << 1, /* include backing chain in
+ block stats */
} qemuDomainStatsFlags;
@@ -18502,6 +18504,19 @@ qemuDomainGetStatsInterface(virQEMUDriverPtr driver
ATTRIBUTE_UNUSED,
#undef QEMU_ADD_NET_PARAM
+#define QEMU_ADD_BLOCK_PARAM_UI(record, maxparams, num, name, value) \
+ do { \
+ char param_name[VIR_TYPED_PARAM_FIELD_LENGTH]; \
+ snprintf(param_name, VIR_TYPED_PARAM_FIELD_LENGTH, \
+ "block.%zu.%s", num, name); \
+ if (virTypedParamsAddUInt(&(record)->params, \
+ &(record)->nparams, \
+ maxparams, \
+ param_name, \
+ value) < 0) \
+ goto cleanup; \
+ } while (0)
+
/* expects a LL, but typed parameter must be ULL */
#define QEMU_ADD_BLOCK_PARAM_LL(record, maxparams, num, name, value) \
do { \
@@ -18539,6 +18554,7 @@ qemuDomainGetStatsOneBlock(virQEMUDriverPtr driver,
virDomainDiskDefPtr disk,
virStorageSourcePtr src,
size_t block_idx,
+ unsigned int backing_idx,
bool abbreviated,
virHashTablePtr stats)
{
@@ -18550,8 +18566,16 @@ qemuDomainGetStatsOneBlock(virQEMUDriverPtr driver,
if (virStorageSourceIsLocalStorage(src) && src->path)
QEMU_ADD_NAME_PARAM(record, maxparams, "block", "path",
block_idx, src->path);
+ if (backing_idx)
+ QEMU_ADD_BLOCK_PARAM_UI(record, maxparams, block_idx, "backingIndex",
+ backing_idx);
- if (abbreviated || !disk->info.alias ||
+ /* FIXME: qemu gives information on backing files, but we aren't
+ * currently storing it into the stats table - we need a common
+ * key in qemu_monitor_json.c:qemuMonitorGetAllBlockStatsInfo and
+ * here for getting at that information, probably something like
+ * asprintf("%s.%d", alias, backing_idx). */
+ if (abbreviated || backing_idx || !disk->info.alias ||
!(entry = virHashLookup(stats, disk->info.alias))) {
if (qemuStorageLimitsRefresh(driver, cfg, dom,
disk, src, NULL, NULL) < 0)
@@ -18617,6 +18641,7 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
bool abbreviated = false;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int count_index = -1;
+ size_t visited = 0;
if (!HAVE_JOB(privflags) || !virDomainObjIsActive(dom)) {
abbreviated = true; /* it's ok, just go ahead silently */
@@ -18640,18 +18665,26 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
for (i = 0; i < dom->def->ndisks; i++) {
virDomainDiskDefPtr disk = dom->def->disks[i];
+ virStorageSourcePtr src = disk->src;
+ unsigned int backing_idx = 0;
- if (qemuDomainGetStatsOneBlock(driver, cfg, dom, record, maxparams,
- disk, disk->src, i, abbreviated,
- stats) < 0)
- goto cleanup;
+ while (src && (!backing_idx ||
+ (privflags & QEMU_DOMAIN_STATS_BACKING))) {
+ if (qemuDomainGetStatsOneBlock(driver, cfg, dom, record, maxparams,
+ disk, src, visited, backing_idx,
+ abbreviated, stats) < 0)
+ goto cleanup;
+ visited++;
+ backing_idx++;
+ src = src->backingStore;
+ }
}
ret = 0;
cleanup:
if (count_index >= 0)
- record->params[count_index].value.ui = i;
+ record->params[count_index].value.ui = visited;
virHashFree(stats);
virObjectUnref(cfg);
return ret;
@@ -18792,11 +18825,13 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
unsigned int domflags = 0;
if (ndoms)
- virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1);
+ virCheckFlags(VIR_CONNECT_GET_ALL_DOMAINS_STATS_BACKING |
+ VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1);
else
virCheckFlags(VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE |
VIR_CONNECT_LIST_DOMAINS_FILTERS_PERSISTENT |
VIR_CONNECT_LIST_DOMAINS_FILTERS_STATE |
+ VIR_CONNECT_GET_ALL_DOMAINS_STATS_BACKING |
VIR_CONNECT_GET_ALL_DOMAINS_STATS_ENFORCE_STATS, -1);
if (virConnectGetAllDomainStatsEnsureACL(conn) < 0)
@@ -18826,6 +18861,8 @@ qemuConnectGetAllDomainStats(virConnectPtr conn,
if (qemuDomainGetStatsNeedMonitor(stats))
privflags |= QEMU_DOMAIN_STATS_HAVE_JOB;
+ if (flags & VIR_CONNECT_GET_ALL_DOMAINS_STATS_BACKING)
+ privflags |= QEMU_DOMAIN_STATS_BACKING;
for (i = 0; i < ndoms; i++) {
domflags = privflags;
--
1.9.3