Make necessary checks for active domain and IOThread capability before
calling the monitor to fetch IOThread data for a domain. If there are
threads, then also return the processor affinity maps - this is similar
to existing GetVcpuPin, GetVcpuInfo, and GetEmulatorPin fetches of the
processor affinity. Having a separate GetIOThreadsPin seemed to be waste
if it could be done in one API.
Signed-off-by: John Ferlan <jferlan(a)redhat.com>
---
src/qemu/qemu_driver.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 114 insertions(+)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 709f468..ea61015 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5541,6 +5541,119 @@ qemuDomainGetMaxVcpus(virDomainPtr dom)
VIR_DOMAIN_VCPU_MAXIMUM));
}
+static int
+qemuDomainGetIOThreadsInfo(virDomainPtr dom,
+ virDomainIOThreadsInfoPtr **info,
+ unsigned int flags)
+{
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ qemuDomainObjPrivatePtr priv;
+ qemuMonitorIOThreadsInfoPtr *iothreads = NULL;
+ virDomainIOThreadsInfoPtr *info_ret = NULL;
+ int niothreads = 0;
+ int maxcpu, hostcpus, maplen;
+ int ret = -1;
+ size_t i;
+
+ virCheckFlags(0, -1);
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ goto cleanup;
+
+ if (virDomainGetIOThreadsInfoEnsureACL(dom->conn, vm->def) < 0)
+ goto cleanup;
+
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("cannot list IOThreads for an inactive domain"));
+ goto endjob;
+ }
+
+ priv = vm->privateData;
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("IOThreads not supported with this binary"));
+ goto endjob;
+ }
+
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
+ goto endjob;
+ niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
+ if (qemuDomainObjExitMonitor(driver, vm) < 0)
+ goto endjob;
+ if (niothreads < 0)
+ goto endjob;
+
+ /* Nothing to do */
+ if (niothreads == 0) {
+ ret = 0;
+ goto endjob;
+ }
+
+ if ((hostcpus = nodeGetCPUCount()) < 0)
+ goto endjob;
+
+ maplen = VIR_CPU_MAPLEN(hostcpus);
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ if (VIR_ALLOC_N(info_ret, niothreads) < 0)
+ goto endjob;
+
+ for (i = 0; i < niothreads; i++) {
+ virBitmapPtr map = NULL;
+ unsigned char *tmpmap = NULL;
+ int tmpmaplen = 0;
+
+ if (VIR_ALLOC(info_ret[i]) < 0)
+ goto endjob;
+
+ if (virStrToLong_ui(iothreads[i]->name + strlen("iothread"), NULL,
10,
+ &info_ret[i]->iothread_id) < 0)
+ goto endjob;
+ info_ret[i]->thread_id = iothreads[i]->thread_id;
+
+ if (VIR_ALLOC_N(info_ret[i]->cpumap, maplen) < 0)
+ goto endjob;
+
+ if (virProcessGetAffinity(iothreads[i]->thread_id, &map, maxcpu) < 0)
+ goto endjob;
+
+ virBitmapToData(map, &tmpmap, &tmpmaplen);
+ if (tmpmaplen > maplen)
+ tmpmaplen = maplen;
+ memcpy(info_ret[i]->cpumap, tmpmap, tmpmaplen);
+ info_ret[i]->cpumaplen = tmpmaplen;
+
+ VIR_FREE(tmpmap);
+ virBitmapFree(map);
+ }
+
+ *info = info_ret;
+ info_ret = NULL;
+ ret = niothreads;
+
+ endjob:
+ qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+ qemuDomObjEndAPI(&vm);
+
+ if (info_ret) {
+ for (i = 0; i < niothreads; i++)
+ virDomainIOThreadsInfoFree(info_ret[i]);
+ VIR_FREE(info_ret);
+ }
+ return ret;
+
+}
+
+
static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
{
virQEMUDriverPtr driver = dom->conn->privateData;
@@ -19141,6 +19254,7 @@ static virHypervisorDriver qemuHypervisorDriver = {
.domainGetEmulatorPinInfo = qemuDomainGetEmulatorPinInfo, /* 0.10.0 */
.domainGetVcpus = qemuDomainGetVcpus, /* 0.4.4 */
.domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
+ .domainGetIOThreadsInfo = qemuDomainGetIOThreadsInfo, /* 1.2.13 */
.domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
.domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
.nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */
--
2.1.0