[libvirt] [PATCH 0/6] Follow-up patches for IOThread API/display

Based on jtomko's review of the IOThread series and my foray into the libvirt-perl bindings, these patches make the following adjustments: Patch 1 - Found while generating the virsh iothreadpin description, the vcpupin description had a few missing words Patch 2 - While working on the libvirt-perl bindings - I found I was a bit overaggressive with my GetIOThreadsInfo interface with regard to checking ReadOnly unnecessarily Patch 3 - Adjust the IOThread CPU Affnity algorithm based on jtomko's review comments Patch 4 - Fallout because I ran the patches through my Coverity checker. Patch 5 - Similar to IOThread - adjust the GetVcpuInfo CPU Affinity algorithm for the returned cpumap Patch 5 - Similar to IOThread - adjust the GetEmulatorInfo CPU Affinity algorithm for the returned cpumap John Ferlan (6): Fix syntax for vcpupin description Remove ReadOnly check for GetIOThreadsInfo qemu: Change/Fix IOThread CPU affinity bitmap manipulation qemu: Resolve Coverity CHECKED_RETURN issue qemu: Change qemuDomainGetVcpuPinInfo bitmap manipulation qemu: Change qemuDomainGetEmulatorPinInfo bitmap manipulation src/libvirt-domain.c | 1 - src/qemu/qemu_driver.c | 177 +++++++++++++++++++++---------------------------- tools/virsh.pod | 4 +- 3 files changed, 76 insertions(+), 106 deletions(-) -- 2.1.0

Signed-off-by: John Ferlan <jferlan@redhat.com> --- tools/virsh.pod | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/virsh.pod b/tools/virsh.pod index f19601e..4670554 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -2284,8 +2284,8 @@ I<vcpu> or omit I<vcpu> to list all at once. I<cpulist> is a list of physical CPU numbers. Its syntax is a comma separated list and a special markup using '-' and '^' (ex. '0-4', '0-3,^2') can also be allowed. The '-' denotes the range and the '^' denotes exclusive. -If you want to reset vcpupin setting, that is, to pin vcpu all physical cpus, -simply specify 'r' as a cpulist. +If you want to reset vcpupin setting, that is, to pin the I<vcpu> to all +physical cpus, simply specify 'r' as a I<cpulist>. If I<--live> is specified, affect a running guest. If I<--config> is specified, affect the next boot of a persistent guest. If I<--current> is specified, affect the current guest state. -- 2.1.0

Signed-off-by: John Ferlan <jferlan@redhat.com> --- src/libvirt-domain.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index c43c8f6..04545fd 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -7919,7 +7919,6 @@ virDomainGetIOThreadsInfo(virDomainPtr dom, virResetLastError(); virCheckDomainReturn(dom, -1); - virCheckReadOnlyGoto(dom->conn->flags, error); virCheckNonNullArgGoto(info, error); *info = NULL; -- 2.1.0

Based on review: http://www.redhat.com/archives/libvir-list/2015-March/msg00294.html Adjust how the cpumap and cpumaplen to be returned are generated Signed-off-by: John Ferlan <jferlan@redhat.com> --- src/qemu/qemu_driver.c | 79 +++++++++++++++++--------------------------------- 1 file changed, 27 insertions(+), 52 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index b37474f..207c62c 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5566,7 +5566,7 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, qemuMonitorIOThreadsInfoPtr *iothreads = NULL; virDomainIOThreadInfoPtr *info_ret = NULL; int niothreads = 0; - int maxcpu, hostcpus, maplen; + int hostcpus; size_t i; int ret = -1; @@ -5602,18 +5602,11 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, if ((hostcpus = nodeGetCPUCount()) < 0) goto endjob; - maplen = VIR_CPU_MAPLEN(hostcpus); - maxcpu = maplen * 8; - if (maxcpu > hostcpus) - maxcpu = hostcpus; - if (VIR_ALLOC_N(info_ret, niothreads) < 0) goto endjob; for (i = 0; i < niothreads; i++) { virBitmapPtr map = NULL; - unsigned char *tmpmap = NULL; - int tmpmaplen = 0; if (VIR_ALLOC(info_ret[i]) < 0) goto endjob; @@ -5622,19 +5615,14 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver, &info_ret[i]->iothread_id) < 0) goto endjob; - if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0) + if (virProcessGetAffinity(iothreads[i]->thread_id, &map, hostcpus) < 0) goto endjob; - if (virProcessGetAffinity(iothreads[i]->thread_id, &map, maxcpu) < 0) + if (virBitmapToData(map, &info_ret[i]->cpumap, + &info_ret[i]->cpumaplen) < 0) { + virBitmapFree(map); goto endjob; - - virBitmapToData(map, &tmpmap, &tmpmaplen); - if (tmpmaplen > maplen) - tmpmaplen = maplen; - memcpy(info_ret[i]->cpumap, tmpmap, tmpmaplen); - info_ret[i]->cpumaplen = tmpmaplen; - - VIR_FREE(tmpmap); + } virBitmapFree(map); } @@ -5665,12 +5653,8 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, virDomainIOThreadInfoPtr **info) { virDomainIOThreadInfoPtr *info_ret = NULL; - virDomainVcpuPinDefPtr *iothreadspin_list; - virBitmapPtr cpumask = NULL; - unsigned char *cpumap; - int maxcpu, hostcpus, maplen; - size_t i, pcpu; - bool pinned; + int hostcpus; + size_t i; int ret = -1; if (targetDef->iothreads == 0) @@ -5679,47 +5663,38 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, if ((hostcpus = nodeGetCPUCount()) < 0) goto cleanup; - maplen = VIR_CPU_MAPLEN(hostcpus); - maxcpu = maplen * 8; - if (maxcpu > hostcpus) - maxcpu = hostcpus; - if (VIR_ALLOC_N(info_ret, targetDef->iothreads) < 0) goto cleanup; for (i = 0; i < targetDef->iothreads; i++) { + virDomainVcpuPinDefPtr pininfo; + virBitmapPtr bitmap = NULL; + if (VIR_ALLOC(info_ret[i]) < 0) goto cleanup; /* IOThreads being counting at 1 */ info_ret[i]->iothread_id = i + 1; - if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0) - goto cleanup; - /* Initialize the cpumap */ - info_ret[i]->cpumaplen = maplen; - memset(info_ret[i]->cpumap, 0xff, maplen); - if (maxcpu % 8) - info_ret[i]->cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1; - } - - /* If iothreadspin setting exists, there are unused physical cpus */ - iothreadspin_list = targetDef->cputune.iothreadspin; - for (i = 0; i < targetDef->cputune.niothreadspin; i++) { - /* vcpuid is the iothread_id... - * iothread_id is the index into info_ret + 1, so we can - * assume that the info_ret index we want is vcpuid - 1 - */ - cpumap = info_ret[iothreadspin_list[i]->vcpuid - 1]->cpumap; - cpumask = iothreadspin_list[i]->cpumask; - - for (pcpu = 0; pcpu < maxcpu; pcpu++) { - if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0) + pininfo = virDomainVcpuPinFindByVcpu(targetDef->cputune.iothreadspin, + targetDef->cputune.niothreadspin, + i + 1); + if (!pininfo) { + if (!(bitmap = virBitmapNew(hostcpus))) goto cleanup; - if (!pinned) - VIR_UNUSE_CPU(cpumap, pcpu); + virBitmapSetAll(bitmap); + } else { + bitmap = pininfo->cpumask; + } + if (virBitmapToData(bitmap, &info_ret[i]->cpumap, + &info_ret[i]->cpumaplen) < 0) { + if (!pininfo) + virBitmapFree(bitmap); + goto cleanup; } + if (!pininfo) + virBitmapFree(bitmap); } *info = info_ret; -- 2.1.0

By adding a call and check of return of virBitmapToData to the IOThreads code, my Coverity checker lets me know qemuDomainHelperGetVcpus also needs to check the status... Signed-off-by: John Ferlan <jferlan@redhat.com> --- src/qemu/qemu_driver.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 207c62c..c1f4e95 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1420,7 +1420,10 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info, int maxinfo, if (virProcessGetAffinity(priv->vcpupids[v], &map, maxcpu) < 0) return -1; - virBitmapToData(map, &tmpmap, &tmpmapLen); + if (virBitmapToData(map, &tmpmap, &tmpmapLen) < 0) { + virBitmapFree(map); + return -1; + } if (tmpmapLen > maplen) tmpmapLen = maplen; memcpy(cpumap, tmpmap, tmpmapLen); -- 2.1.0

Follow-up to the IOThread review on CPU affinity map manipulation: http://www.redhat.com/archives/libvir-list/2015-March/msg00294.html indicates that the GetVcpuPinInfo could use similar algorithm adjustments which is what this patch does. Signed-off-by: John Ferlan <jferlan@redhat.com> --- src/qemu/qemu_driver.c | 59 ++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index c1f4e95..85387a8 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5100,12 +5100,8 @@ qemuDomainGetVcpuPinInfo(virDomainPtr dom, virDomainObjPtr vm = NULL; virDomainDefPtr targetDef = NULL; int ret = -1; - int maxcpu, hostcpus, vcpu, pcpu; - int n; - virDomainVcpuPinDefPtr *vcpupin_list; - virBitmapPtr cpumask = NULL; + int hostcpus, vcpu; unsigned char *cpumap; - bool pinned; virCapsPtr caps = NULL; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | @@ -5133,39 +5129,40 @@ qemuDomainGetVcpuPinInfo(virDomainPtr dom, if ((hostcpus = nodeGetCPUCount()) < 0) goto cleanup; - maxcpu = maplen * 8; - if (maxcpu > hostcpus) - maxcpu = hostcpus; - - /* Clamp to actual number of vcpus */ - if (ncpumaps > targetDef->vcpus) - ncpumaps = targetDef->vcpus; - if (ncpumaps < 1) goto cleanup; - /* initialize cpumaps */ - memset(cpumaps, 0xff, maplen * ncpumaps); - if (maxcpu % 8) { - for (vcpu = 0; vcpu < ncpumaps; vcpu++) { - cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu); - cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1; - } - } + for (vcpu = 0; vcpu < ncpumaps; vcpu++) { + virDomainVcpuPinDefPtr pininfo; + virBitmapPtr bitmap = NULL; + unsigned char *tmpmap = NULL; + int tmpmaplen; - /* if vcpupin setting exists, there are unused physical cpus */ - for (n = 0; n < targetDef->cputune.nvcpupin; n++) { - vcpupin_list = targetDef->cputune.vcpupin; - vcpu = vcpupin_list[n]->vcpuid; - cpumask = vcpupin_list[n]->cpumask; - cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu); - for (pcpu = 0; pcpu < maxcpu; pcpu++) { - if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0) + pininfo = virDomainVcpuPinFindByVcpu(targetDef->cputune.vcpupin, + targetDef->cputune.nvcpupin, + vcpu); + if (!pininfo) { + if (!(bitmap = virBitmapNew(hostcpus))) goto cleanup; - if (!pinned) - VIR_UNUSE_CPU(cpumap, pcpu); + virBitmapSetAll(bitmap); + } else { + bitmap = pininfo->cpumask; + } + + if (virBitmapToData(bitmap, &tmpmap, &tmpmaplen) < 0) { + if (!pininfo) + virBitmapFree(bitmap); + goto cleanup; } + if (tmpmaplen > maplen) + tmpmaplen = maplen; + cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu); + memcpy(cpumap, tmpmap, tmpmaplen); + if (!pininfo) + virBitmapFree(bitmap); + VIR_FREE(tmpmap); } + ret = ncpumaps; cleanup: -- 2.1.0

On Fri, Mar 06, 2015 at 01:47:11PM -0500, John Ferlan wrote:
Follow-up to the IOThread review on CPU affinity map manipulation:
http://www.redhat.com/archives/libvir-list/2015-March/msg00294.html
indicates that the GetVcpuPinInfo could use similar algorithm adjustments which is what this patch does.
Signed-off-by: John Ferlan <jferlan@redhat.com> --- src/qemu/qemu_driver.c | 59 ++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 31 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index c1f4e95..85387a8 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5100,12 +5100,8 @@ qemuDomainGetVcpuPinInfo(virDomainPtr dom, virDomainObjPtr vm = NULL; virDomainDefPtr targetDef = NULL; int ret = -1; - int maxcpu, hostcpus, vcpu, pcpu; - int n; - virDomainVcpuPinDefPtr *vcpupin_list; - virBitmapPtr cpumask = NULL; + int hostcpus, vcpu; unsigned char *cpumap; - bool pinned; virCapsPtr caps = NULL;
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | @@ -5133,39 +5129,40 @@ qemuDomainGetVcpuPinInfo(virDomainPtr dom, if ((hostcpus = nodeGetCPUCount()) < 0) goto cleanup;
- maxcpu = maplen * 8; - if (maxcpu > hostcpus) - maxcpu = hostcpus; -
- /* Clamp to actual number of vcpus */ - if (ncpumaps > targetDef->vcpus) - ncpumaps = targetDef->vcpus; -
This check can stay - we don't need to spend time returning a full bitmap for vcpus that don't have a pin info beacuse they don't exist. Jan

Follow-up to the IOThread review on CPU affinity map manipulation: http://www.redhat.com/archives/libvir-list/2015-March/msg00294.html indicates that the GetEmulatorPinInfo could use similar algorithm adjustments which is what this patch does. Signed-off-by: John Ferlan <jferlan@redhat.com> --- src/qemu/qemu_driver.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 85387a8..b95f5ac 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5364,10 +5364,12 @@ qemuDomainGetEmulatorPinInfo(virDomainPtr dom, virDomainObjPtr vm = NULL; virDomainDefPtr targetDef = NULL; int ret = -1; - int maxcpu, hostcpus, pcpu; + int hostcpus; virBitmapPtr cpumask = NULL; - bool pinned; + virBitmapPtr bitmap = NULL; virCapsPtr caps = NULL; + unsigned char *tmpmap = NULL; + int tmpmaplen; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG, -1); @@ -5394,36 +5396,30 @@ qemuDomainGetEmulatorPinInfo(virDomainPtr dom, if ((hostcpus = nodeGetCPUCount()) < 0) goto cleanup; - maxcpu = maplen * 8; - if (maxcpu > hostcpus) - maxcpu = hostcpus; - - /* initialize cpumaps */ - memset(cpumaps, 0xff, maplen); - if (maxcpu % 8) - cpumaps[maplen - 1] &= (1 << maxcpu % 8) - 1; - if (targetDef->cputune.emulatorpin) { cpumask = targetDef->cputune.emulatorpin->cpumask; } else if (targetDef->cpumask) { cpumask = targetDef->cpumask; } else { - ret = 0; - goto cleanup; - } - - for (pcpu = 0; pcpu < maxcpu; pcpu++) { - if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0) + if (!(bitmap = virBitmapNew(hostcpus))) goto cleanup; - if (!pinned) - VIR_UNUSE_CPU(cpumaps, pcpu); + virBitmapSetAll(bitmap); + cpumask = bitmap; } + if (virBitmapToData(cpumask, &tmpmap, &tmpmaplen) < 0) + goto cleanup; + if (tmpmaplen > maplen) + tmpmaplen = maplen; + memcpy(cpumaps, tmpmap, tmpmaplen); + VIR_FREE(tmpmap); + ret = 1; cleanup: qemuDomObjEndAPI(&vm); virObjectUnref(caps); + virBitmapFree(bitmap); return ret; } -- 2.1.0

On Fri, Mar 06, 2015 at 01:47:06PM -0500, John Ferlan wrote:
Based on jtomko's review of the IOThread series and my foray into the libvirt-perl bindings, these patches make the following adjustments:
Patch 1 - Found while generating the virsh iothreadpin description, the vcpupin description had a few missing words Patch 2 - While working on the libvirt-perl bindings - I found I was a bit overaggressive with my GetIOThreadsInfo interface with regard to checking ReadOnly unnecessarily Patch 3 - Adjust the IOThread CPU Affnity algorithm based on jtomko's review comments Patch 4 - Fallout because I ran the patches through my Coverity checker. Patch 5 - Similar to IOThread - adjust the GetVcpuInfo CPU Affinity algorithm for the returned cpumap Patch 5 - Similar to IOThread - adjust the GetEmulatorInfo CPU Affinity algorithm for the returned cpumap
John Ferlan (6): Fix syntax for vcpupin description Remove ReadOnly check for GetIOThreadsInfo qemu: Change/Fix IOThread CPU affinity bitmap manipulation qemu: Resolve Coverity CHECKED_RETURN issue qemu: Change qemuDomainGetVcpuPinInfo bitmap manipulation qemu: Change qemuDomainGetEmulatorPinInfo bitmap manipulation
src/libvirt-domain.c | 1 - src/qemu/qemu_driver.c | 177 +++++++++++++++++++++---------------------------- tools/virsh.pod | 4 +- 3 files changed, 76 insertions(+), 106 deletions(-)
ACK series, thanks for touching up VcpuInfo and EmulatorInfo as well! There's one bug that I noticed: If the CPUs are pinned domain-wide, that is: <vcpu placement='static' cpuset='0-1'>4</vcpu> <iothreads>2</iothreads> Both vcpu threads and iothreads will inherit this pinning. For a shutoff domain, vcpupininfo will display 0-1 for all vcpus, but iothreadsinfo shows 0-4, even though they will get pinned to 0-1 after domain startup. Turns out the vpcupin info is filled for all the vcpus when the XML is parsed since commit 10f8a45deb0f057a70a0d49794d3a3d19d17dceb Falling back to targetDef->cpumask in qemuDomainGetIOThreadsConfig (as qemuDomainGetEmulatorPinInfo does) would solve that too. Jan

On 03/08/2015 07:03 AM, Ján Tomko wrote:
On Fri, Mar 06, 2015 at 01:47:06PM -0500, John Ferlan wrote:
Based on jtomko's review of the IOThread series and my foray into the libvirt-perl bindings, these patches make the following adjustments:
Patch 1 - Found while generating the virsh iothreadpin description, the vcpupin description had a few missing words Patch 2 - While working on the libvirt-perl bindings - I found I was a bit overaggressive with my GetIOThreadsInfo interface with regard to checking ReadOnly unnecessarily Patch 3 - Adjust the IOThread CPU Affnity algorithm based on jtomko's review comments Patch 4 - Fallout because I ran the patches through my Coverity checker. Patch 5 - Similar to IOThread - adjust the GetVcpuInfo CPU Affinity algorithm for the returned cpumap Patch 5 - Similar to IOThread - adjust the GetEmulatorInfo CPU Affinity algorithm for the returned cpumap
John Ferlan (6): Fix syntax for vcpupin description Remove ReadOnly check for GetIOThreadsInfo qemu: Change/Fix IOThread CPU affinity bitmap manipulation qemu: Resolve Coverity CHECKED_RETURN issue qemu: Change qemuDomainGetVcpuPinInfo bitmap manipulation qemu: Change qemuDomainGetEmulatorPinInfo bitmap manipulation
src/libvirt-domain.c | 1 - src/qemu/qemu_driver.c | 177 +++++++++++++++++++++---------------------------- tools/virsh.pod | 4 +- 3 files changed, 76 insertions(+), 106 deletions(-)
ACK series, thanks for touching up VcpuInfo and EmulatorInfo as well!
There's one bug that I noticed: If the CPUs are pinned domain-wide, that is: <vcpu placement='static' cpuset='0-1'>4</vcpu> <iothreads>2</iothreads>
Both vcpu threads and iothreads will inherit this pinning.
For a shutoff domain, vcpupininfo will display 0-1 for all vcpus, but iothreadsinfo shows 0-4, even though they will get pinned to 0-1 after domain startup.
Turns out the vpcupin info is filled for all the vcpus when the XML is parsed since commit 10f8a45deb0f057a70a0d49794d3a3d19d17dceb
Falling back to targetDef->cpumask in qemuDomainGetIOThreadsConfig (as qemuDomainGetEmulatorPinInfo does) would solve that too.
Squashed the following into patch 4 to address this... diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 2a9db1b..ceceafa 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5653,6 +5653,8 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, virDomainIOThreadInfoPtr **info) { virDomainIOThreadInfoPtr *info_ret = NULL; + virBitmapPtr bitmap = NULL; + virBitmapPtr cpumask = NULL; int hostcpus; size_t i; int ret = -1; @@ -5668,7 +5670,6 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, for (i = 0; i < targetDef->iothreads; i++) { virDomainVcpuPinDefPtr pininfo; - virBitmapPtr bitmap = NULL; if (VIR_ALLOC(info_ret[i]) < 0) goto cleanup; @@ -5681,20 +5682,22 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, targetDef->cputune.niothreadspin, i + 1); if (!pininfo) { - if (!(bitmap = virBitmapNew(hostcpus))) - goto cleanup; - virBitmapSetAll(bitmap); + if (targetDef->cpumask) { + cpumask = targetDef->cpumask; + } else { + if (!(bitmap = virBitmapNew(hostcpus))) + goto cleanup; + virBitmapSetAll(bitmap); + cpumask = bitmap; + } } else { - bitmap = pininfo->cpumask; + cpumask = pininfo->cpumask; } - if (virBitmapToData(bitmap, &info_ret[i]->cpumap, - &info_ret[i]->cpumaplen) < 0) { - if (!pininfo) - virBitmapFree(bitmap); + if (virBitmapToData(cpumask, &info_ret[i]->cpumap, + &info_ret[i]->cpumaplen) < 0) goto cleanup; - } - if (!pininfo) - virBitmapFree(bitmap); + virBitmapFree(bitmap); + bitmap = NULL; } *info = info_ret; @@ -5707,6 +5710,7 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, virDomainIOThreadsInfoFree(info_ret[i]); VIR_FREE(info_ret); } + virBitmapFree(bitmap); return ret; } Replaced the overly aggressively removed lines from patch 5... I will send a separate patch for the domain_conf.c change that addresses any needs for iothreadspin setup to use the def->cpumask. and pushed. Tks - John

On Mon, Mar 09, 2015 at 08:27:10AM -0400, John Ferlan wrote:
On 03/08/2015 07:03 AM, Ján Tomko wrote:
ACK series, thanks for touching up VcpuInfo and EmulatorInfo as well!
There's one bug that I noticed: If the CPUs are pinned domain-wide, that is: <vcpu placement='static' cpuset='0-1'>4</vcpu> <iothreads>2</iothreads>
Both vcpu threads and iothreads will inherit this pinning.
For a shutoff domain, vcpupininfo will display 0-1 for all vcpus, but iothreadsinfo shows 0-4, even though they will get pinned to 0-1 after domain startup.
Turns out the vpcupin info is filled for all the vcpus when the XML is parsed since commit 10f8a45deb0f057a70a0d49794d3a3d19d17dceb
Falling back to targetDef->cpumask in qemuDomainGetIOThreadsConfig (as qemuDomainGetEmulatorPinInfo does) would solve that too.
Squashed the following into patch 4 to address this...
I don't think this functional change belongs to the patch refactoring the function to use the virBitmap* APIs. But since the functionality has not yet been released, nobody should feel the need to backport any of these commits, so mixing them up is not that big deal.
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 2a9db1b..ceceafa 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5653,6 +5653,8 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, virDomainIOThreadInfoPtr **info) { virDomainIOThreadInfoPtr *info_ret = NULL; + virBitmapPtr bitmap = NULL; + virBitmapPtr cpumask = NULL; int hostcpus; size_t i; int ret = -1; @@ -5668,7 +5670,6 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef,
for (i = 0; i < targetDef->iothreads; i++) { virDomainVcpuPinDefPtr pininfo; - virBitmapPtr bitmap = NULL;
if (VIR_ALLOC(info_ret[i]) < 0) goto cleanup; @@ -5681,20 +5682,22 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, targetDef->cputune.niothreadspin, i + 1); if (!pininfo) { - if (!(bitmap = virBitmapNew(hostcpus))) - goto cleanup; - virBitmapSetAll(bitmap); + if (targetDef->cpumask) { + cpumask = targetDef->cpumask; + } else { + if (!(bitmap = virBitmapNew(hostcpus))) + goto cleanup; + virBitmapSetAll(bitmap); + cpumask = bitmap; + } } else { - bitmap = pininfo->cpumask; + cpumask = pininfo->cpumask; } - if (virBitmapToData(bitmap, &info_ret[i]->cpumap, - &info_ret[i]->cpumaplen) < 0) { - if (!pininfo) - virBitmapFree(bitmap); + if (virBitmapToData(cpumask, &info_ret[i]->cpumap, + &info_ret[i]->cpumaplen) < 0) goto cleanup; - } - if (!pininfo) - virBitmapFree(bitmap); + virBitmapFree(bitmap); + bitmap = NULL; }
*info = info_ret; @@ -5707,6 +5710,7 @@ qemuDomainGetIOThreadsConfig(virDomainDefPtr targetDef, virDomainIOThreadsInfoFree(info_ret[i]); VIR_FREE(info_ret); } + virBitmapFree(bitmap);
return ret; }
Replaced the overly aggressively removed lines from patch 5...
I will send a separate patch for the domain_conf.c change that addresses any needs for iothreadspin setup to use the def->cpumask.
However the squashed-in patch could've benefited from going through a standard review process. If you plan to copy the vcpupin code in domain_conf.c to do the same with iothreadspin, the squashed-in change is redundant. Jan
participants (2)
-
John Ferlan
-
Ján Tomko