Dario Faggioli wrote:
So that it is possible to query vcpu related information of
a persistent but not running domain, like it is for the QEMU
driver.
In fact, before this patch, we have:
# virsh list --all
Id Name State
----------------------------------------------------
5 debian_32 running
- fedora20_64 shut off
# virsh vcpuinfo fedora20_64
error: this function is not supported by the connection driver: virDomainGetVcpuPinInfo
After (same situation as above, i.e., fedora20_64 not running):
# virsh vcpuinfo fedora20_64
VCPU: 0
CPU: N/A
State: N/A
CPU time N/A
CPU Affinity: yyyyyyyy
VCPU: 1
CPU: N/A
State: N/A
CPU time N/A
CPU Affinity: yyyyyyyy
Signed-off-by: Dario Faggioli <dario.faggioli(a)citrix.com>
Cc: Jim Fehlig <jfehlig(a)suse.com>
Cc: Ian Jackson <Ian.Jackson(a)eu.citrix.com>
---
src/libxl/libxl_driver.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 83 insertions(+)
diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c
index 692c3b7..750d4ec 100644
--- a/src/libxl/libxl_driver.c
+++ b/src/libxl/libxl_driver.c
@@ -2415,6 +2415,88 @@ cleanup:
return ret;
}
+static int
+libxlDomainGetVcpuPinInfo(virDomainPtr dom, int ncpumaps,
+ unsigned char *cpumaps, int maplen,
+ unsigned int flags)
+{
+ libxlDriverPrivatePtr driver = dom->conn->privateData;
+ libxlDriverConfigPtr cfg = libxlDriverConfigGet(driver);
+ virDomainObjPtr vm = NULL;
+ virDomainDefPtr targetDef = NULL;
+ virDomainVcpuPinDefPtr *vcpupin_list;
+ virBitmapPtr cpumask = NULL;
+ int maxcpu, hostcpus, vcpu, pcpu, n, ret = -1;
+ unsigned char *cpumap;
+ bool pinned;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ if (!(vm = libxlDomObjFromDomain(dom)))
+ goto cleanup;
+
+ if (virDomainGetVcpuPinInfoEnsureACL(dom->conn, vm->def) < 0)
+ goto cleanup;
+
+ if (virDomainLiveConfigHelperMethod(cfg->caps, driver->xmlopt, vm,
+ &flags, &targetDef) < 0)
+ goto cleanup;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ targetDef = vm->def;
+ }
+
+ sa_assert(targetDef);
I think we should add a comment stating this is needed to silence
Coverity, similar to the qemu driver.
+
+ /* Clamp to actual number of vcpus */
+ if (ncpumaps > targetDef->vcpus)
+ ncpumaps = targetDef->vcpus;
+
+ if (!cpumaps || ncpumaps < 1) {
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
+ _("cannot return affinity via a NULL pointer"));
+ goto cleanup;
+ }
cpumaps is guaranteed to be non-NULL on entry and ncpumaps is guaranteed
to be > 0 on entry, so the above check can be removed.
+
+ /* we use cfg->ctx, as vm->privateData->ctx may be NULL if VM is down */
+ if ((hostcpus = libxl_get_max_cpus(cfg->ctx)) < 0)
+ goto cleanup;
+
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ /* initialize cpumaps */
+ memset(cpumaps, 0xff, maplen * ncpumaps);
+ if (maxcpu % 8) {
+ for (vcpu = 0; vcpu < ncpumaps; vcpu++) {
+ cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu);
+ cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1;
+ }
+ }
+
+ /* if vcpupin setting exists, there may be unused pcpus */
+ for (n = 0; n < targetDef->cputune.nvcpupin; n++) {
+ vcpupin_list = targetDef->cputune.vcpupin;
+ vcpu = vcpupin_list[n]->vcpuid;
+ cpumask = vcpupin_list[n]->cpumask;
+ cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu);
+ for (pcpu = 0; pcpu < maxcpu; pcpu++) {
+ if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0)
+ goto cleanup;
+ if (!pinned)
+ VIR_UNUSE_CPU(cpumap, pcpu);
+ }
+ }
+ ret = ncpumaps;
+
+cleanup:
+ if (vm)
+ virObjectUnlock(vm);
+ virObjectUnref(cfg);
+ return ret;
+}
static int
libxlDomainGetVcpus(virDomainPtr dom, virVcpuInfoPtr info, int maxinfo,
@@ -4237,6 +4319,7 @@ static virDriver libxlDriver = {
.domainGetVcpusFlags = libxlDomainGetVcpusFlags, /* 0.9.0 */
.domainPinVcpu = libxlDomainPinVcpu, /* 0.9.0 */
.domainGetVcpus = libxlDomainGetVcpus, /* 0.9.0 */
+ .domainGetVcpuPinInfo = libxlDomainGetVcpuPinInfo, /* 1.2.1 */
.domainGetXMLDesc = libxlDomainGetXMLDesc, /* 0.9.0 */
.connectDomainXMLFromNative = libxlConnectDomainXMLFromNative, /* 0.9.0 */
.connectDomainXMLToNative = libxlConnectDomainXMLToNative, /* 0.9.0 */
Looks good otherwise. Thanks!
Regards,
Jim