[libvirt] [PATCH v2 0/3] libxl: improve vcpu pinning

Hi, Take two, with the comments from Jim on v1 fixed. Basically, that means I've added a few comments and removed some non necessary safety checking. Series available here: git://xenbits.xen.org/people/dariof/libvirt.git libxl/VcpuPinX-v2 Regards, Dario --- Dario Faggioli (3): libxl: implement virDomainGetVcpuPinInfo libxl: implement virDomainPinVcpuFlags libxl: correctly handle affinity reset in virDomainPinVcpu[Flags] src/libxl/libxl_driver.c | 166 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 145 insertions(+), 21 deletions(-) -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://about.me/dario.faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)

So that it is possible to query vcpu related information of a persistent but not running domain, like it is for the QEMU driver. In fact, before this patch, we have: # virsh list --all Id Name State ---------------------------------------------------- 5 debian_32 running - fedora20_64 shut off # virsh vcpuinfo fedora20_64 error: this function is not supported by the connection driver: virDomainGetVcpuPinInfo After (same situation as above, i.e., fedora20_64 not running): # virsh vcpuinfo fedora20_64 VCPU: 0 CPU: N/A State: N/A CPU time N/A CPU Affinity: yyyyyyyy VCPU: 1 CPU: N/A State: N/A CPU time N/A CPU Affinity: yyyyyyyy Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com> Cc: Jim Fehlig <jfehlig@suse.com> Cc: Ian Jackson <Ian.Jackson@eu.citrix.com> --- src/libxl/libxl_driver.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c index 29aa6c7..3855509 100644 --- a/src/libxl/libxl_driver.c +++ b/src/libxl/libxl_driver.c @@ -2417,6 +2417,83 @@ cleanup: return ret; } +static int +libxlDomainGetVcpuPinInfo(virDomainPtr dom, int ncpumaps, + unsigned char *cpumaps, int maplen, + unsigned int flags) +{ + libxlDriverPrivatePtr driver = dom->conn->privateData; + libxlDriverConfigPtr cfg = libxlDriverConfigGet(driver); + virDomainObjPtr vm = NULL; + virDomainDefPtr targetDef = NULL; + virDomainVcpuPinDefPtr *vcpupin_list; + virBitmapPtr cpumask = NULL; + int maxcpu, hostcpus, vcpu, pcpu, n, ret = -1; + unsigned char *cpumap; + bool pinned; + + virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | + VIR_DOMAIN_AFFECT_CONFIG, -1); + + if (!(vm = libxlDomObjFromDomain(dom))) + goto cleanup; + + if (virDomainGetVcpuPinInfoEnsureACL(dom->conn, vm->def) < 0) + goto cleanup; + + if (virDomainLiveConfigHelperMethod(cfg->caps, driver->xmlopt, vm, + &flags, &targetDef) < 0) + goto cleanup; + + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + targetDef = vm->def; + } + + /* Make sure coverity knows targetDef is valid at this point. */ + sa_assert(targetDef); + + /* Clamp to actual number of vcpus */ + if (ncpumaps > targetDef->vcpus) + ncpumaps = targetDef->vcpus; + + /* we use cfg->ctx, as vm->privateData->ctx may be NULL if VM is down. */ + if ((hostcpus = libxl_get_max_cpus(cfg->ctx)) < 0) + goto cleanup; + + maxcpu = maplen * 8; + if (maxcpu > hostcpus) + maxcpu = hostcpus; + + /* initialize cpumaps */ + memset(cpumaps, 0xff, maplen * ncpumaps); + if (maxcpu % 8) { + for (vcpu = 0; vcpu < ncpumaps; vcpu++) { + cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu); + cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1; + } + } + + /* if vcpupin setting exists, there may be unused pcpus */ + for (n = 0; n < targetDef->cputune.nvcpupin; n++) { + vcpupin_list = targetDef->cputune.vcpupin; + vcpu = vcpupin_list[n]->vcpuid; + cpumask = vcpupin_list[n]->cpumask; + cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu); + for (pcpu = 0; pcpu < maxcpu; pcpu++) { + if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0) + goto cleanup; + if (!pinned) + VIR_UNUSE_CPU(cpumap, pcpu); + } + } + ret = ncpumaps; + +cleanup: + if (vm) + virObjectUnlock(vm); + virObjectUnref(cfg); + return ret; +} static int libxlDomainGetVcpus(virDomainPtr dom, virVcpuInfoPtr info, int maxinfo, @@ -4244,6 +4321,7 @@ static virDriver libxlDriver = { .domainGetVcpusFlags = libxlDomainGetVcpusFlags, /* 0.9.0 */ .domainPinVcpu = libxlDomainPinVcpu, /* 0.9.0 */ .domainGetVcpus = libxlDomainGetVcpus, /* 0.9.0 */ + .domainGetVcpuPinInfo = libxlDomainGetVcpuPinInfo, /* 1.2.1 */ .domainGetXMLDesc = libxlDomainGetXMLDesc, /* 0.9.0 */ .connectDomainXMLFromNative = libxlConnectDomainXMLFromNative, /* 0.9.0 */ .connectDomainXMLToNative = libxlConnectDomainXMLToNative, /* 0.9.0 */

And use it to implement libxlDomainPinVcpu(), similarly to what happens in the QEMU driver. This way, it is possible to both query and change the vcpu affinity of a persistent but not running domain. In face, before this patch, we have: # virsh list --all Id Name State ---------------------------------------------------- 5 debian_32 running - fedora20_64 shut off # virsh vcpupin fedora20_64 0 2-4 --current error: this function is not supported by the connection driver: virDomainPinVcpuFlags After (same situation as above): # virsh vcpupin fedora20_64 0 2-4 --current # virsh vcpupin fedora20_64 0 VCPU: CPU Affinity ---------------------------------- 0: 2-4 Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com> Cc: Jim Fehlig <jfehlig@suse.com> Cc: Ian Jackson <Ian.Jackson@eu.citrix.com> --- src/libxl/libxl_driver.c | 73 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 51 insertions(+), 22 deletions(-) diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c index 3855509..e3da646 100644 --- a/src/libxl/libxl_driver.c +++ b/src/libxl/libxl_driver.c @@ -2358,45 +2358,62 @@ cleanup: } static int -libxlDomainPinVcpu(virDomainPtr dom, unsigned int vcpu, unsigned char *cpumap, - int maplen) +libxlDomainPinVcpuFlags(virDomainPtr dom, unsigned int vcpu, + unsigned char *cpumap, int maplen, + unsigned int flags) { libxlDriverPrivatePtr driver = dom->conn->privateData; libxlDriverConfigPtr cfg = libxlDriverConfigGet(driver); - libxlDomainObjPrivatePtr priv; + virDomainDefPtr targetDef = NULL; virDomainObjPtr vm; int ret = -1; - libxl_bitmap map; + + virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | + VIR_DOMAIN_AFFECT_CONFIG, -1); if (!(vm = libxlDomObjFromDomain(dom))) goto cleanup; - if (virDomainPinVcpuEnsureACL(dom->conn, vm->def) < 0) + if (virDomainPinVcpuFlagsEnsureACL(dom->conn, vm->def, flags) < 0) goto cleanup; - if (!virDomainObjIsActive(vm)) { + if ((flags & VIR_DOMAIN_AFFECT_LIVE) && !virDomainObjIsActive(vm)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", - _("cannot pin vcpus on an inactive domain")); + _("domain is inactive")); goto cleanup; } - priv = vm->privateData; - - map.size = maplen; - map.map = cpumap; - if (libxl_set_vcpuaffinity(priv->ctx, dom->id, vcpu, &map) != 0) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Failed to pin vcpu '%d' with libxenlight"), vcpu); + if (virDomainLiveConfigHelperMethod(cfg->caps, driver->xmlopt, vm, + &flags, &targetDef) < 0) goto cleanup; + + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + targetDef = vm->def; + } + + /* Make sure coverity knows targetDef is valid at this point. */ + sa_assert(targetDef); + + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + libxl_bitmap map = { .size = maplen, .map = cpumap }; + libxlDomainObjPrivatePtr priv; + + priv = vm->privateData; + if (libxl_set_vcpuaffinity(priv->ctx, dom->id, vcpu, &map) != 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Failed to pin vcpu '%d' with libxenlight"), + vcpu); + goto cleanup; + } } - if (!vm->def->cputune.vcpupin) { - if (VIR_ALLOC(vm->def->cputune.vcpupin) < 0) + if (!targetDef->cputune.vcpupin) { + if (VIR_ALLOC(targetDef->cputune.vcpupin) < 0) goto cleanup; - vm->def->cputune.nvcpupin = 0; + targetDef->cputune.nvcpupin = 0; } - if (virDomainVcpuPinAdd(&vm->def->cputune.vcpupin, - &vm->def->cputune.nvcpupin, + if (virDomainVcpuPinAdd(&targetDef->cputune.vcpupin, + &targetDef->cputune.nvcpupin, cpumap, maplen, vcpu) < 0) { @@ -2405,11 +2422,14 @@ libxlDomainPinVcpu(virDomainPtr dom, unsigned int vcpu, unsigned char *cpumap, goto cleanup; } - if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) - goto cleanup; - ret = 0; + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + ret = virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm); + } else if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + ret = virDomainSaveConfig(cfg->configDir, targetDef); + } + cleanup: if (vm) virObjectUnlock(vm); @@ -2418,6 +2438,14 @@ cleanup: } static int +libxlDomainPinVcpu(virDomainPtr dom, unsigned int vcpu, unsigned char *cpumap, + int maplen) +{ + return libxlDomainPinVcpuFlags(dom, vcpu, cpumap, maplen, + VIR_DOMAIN_AFFECT_LIVE); +} + +static int libxlDomainGetVcpuPinInfo(virDomainPtr dom, int ncpumaps, unsigned char *cpumaps, int maplen, unsigned int flags) @@ -4320,6 +4348,7 @@ static virDriver libxlDriver = { .domainSetVcpusFlags = libxlDomainSetVcpusFlags, /* 0.9.0 */ .domainGetVcpusFlags = libxlDomainGetVcpusFlags, /* 0.9.0 */ .domainPinVcpu = libxlDomainPinVcpu, /* 0.9.0 */ + .domainPinVcpuFlags = libxlDomainPinVcpuFlags, /* 1.2.1 */ .domainGetVcpus = libxlDomainGetVcpus, /* 0.9.0 */ .domainGetVcpuPinInfo = libxlDomainGetVcpuPinInfo, /* 1.2.1 */ .domainGetXMLDesc = libxlDomainGetXMLDesc, /* 0.9.0 */

By actually removing the <vcpupin> element (from within the <cputune> section) from the XML, rather than jus update it with a fully set vcpu affinity mask. Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com> Cc: Jim Fehlig <jfehlig@suse.com> Cc: Ian Jackson <Ian.Jackson@eu.citrix.com> --- src/libxl/libxl_driver.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c index 8528106..4705870 100644 --- a/src/libxl/libxl_driver.c +++ b/src/libxl/libxl_driver.c @@ -2365,6 +2365,7 @@ libxlDomainPinVcpuFlags(virDomainPtr dom, unsigned int vcpu, libxlDriverPrivatePtr driver = dom->conn->privateData; libxlDriverConfigPtr cfg = libxlDriverConfigGet(driver); virDomainDefPtr targetDef = NULL; + virBitmapPtr pcpumap = NULL; virDomainObjPtr vm; int ret = -1; @@ -2393,6 +2394,10 @@ libxlDomainPinVcpuFlags(virDomainPtr dom, unsigned int vcpu, sa_assert(targetDef); + pcpumap = virBitmapNewData(cpumap, maplen); + if (!pcpumap) + goto cleanup; + if (flags & VIR_DOMAIN_AFFECT_LIVE) { libxl_bitmap map = { .size = maplen, .map = cpumap }; libxlDomainObjPrivatePtr priv; @@ -2406,6 +2411,17 @@ libxlDomainPinVcpuFlags(virDomainPtr dom, unsigned int vcpu, } } + /* full bitmap means reset the settings (if any). */ + if (virBitmapIsAllSet(pcpumap)) { + if (virDomainVcpuPinDel(targetDef, vcpu) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Failed to delete vcpupin xml for vcpu '%d'"), + vcpu); + goto cleanup; + } + goto out; + } + if (!targetDef->cputune.vcpupin) { if (VIR_ALLOC(targetDef->cputune.vcpupin) < 0) goto cleanup; @@ -2421,6 +2437,7 @@ libxlDomainPinVcpuFlags(virDomainPtr dom, unsigned int vcpu, goto cleanup; } +out: ret = 0; if (flags & VIR_DOMAIN_AFFECT_LIVE) { @@ -2432,6 +2449,7 @@ libxlDomainPinVcpuFlags(virDomainPtr dom, unsigned int vcpu, cleanup: if (vm) virObjectUnlock(vm); + virBitmapFree(pcpumap); virObjectUnref(cfg); return ret; }

Dario Faggioli wrote:
Hi,
Take two, with the comments from Jim on v1 fixed. Basically, that means I've added a few comments and removed some non necessary safety checking.
Series available here:
git://xenbits.xen.org/people/dariof/libvirt.git libxl/VcpuPinX-v2
Regards, Dario
---
Dario Faggioli (3): libxl: implement virDomainGetVcpuPinInfo libxl: implement virDomainPinVcpuFlags libxl: correctly handle affinity reset in virDomainPinVcpu[Flags]
src/libxl/libxl_driver.c | 166 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 145 insertions(+), 21 deletions(-)
Thanks for addressing my previous comments. Patch 3 needed a trivial rebase after you added the Coverity comments in patch 2. ACK series and now pushed. Thanks Dario! Regards, Jim
participants (2)
-
Dario Faggioli
-
Jim Fehlig