Add qemuDomainGetIOThreadPin to handle getting the CPU Affinity for
one specific IOThread
Add qemuDomainPinIOThread to handle setting the cpuset for a specific
IOThread
Signed-off-by: John Ferlan <jferlan(a)redhat.com>
---
include/libvirt/libvirt-domain.h | 9 ++
src/qemu/qemu_driver.c | 302 +++++++++++++++++++++++++++++++++++++++
2 files changed, 311 insertions(+)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index fc35cd2..e81389a 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -3223,6 +3223,15 @@ typedef void
(*virConnectDomainEventDeviceRemovedCallback)(virConnectPtr conn,
# define VIR_DOMAIN_TUNABLE_CPU_EMULATORPIN "cputune.emulatorpin"
/**
+ * VIR_DOMAIN_TUNABLE_CPU_IOTHREADSPIN:
+ *
+ * Macro represents formatted pinning for one IOThread specified by id which is
+ * appended to the parameter name, for example "cputune.iothreadpin1",
+ * as VIR_TYPED_PARAM_STRING.
+ */
+# define VIR_DOMAIN_TUNABLE_CPU_IOTHREADSPIN "cputune.iothreadpin%u"
+
+/**
* VIR_DOMAIN_TUNABLE_CPU_CPU_SHARES:
*
* Macro represents proportional weight of the scheduler used on the
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d8a761d..5c8040e 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5780,6 +5780,306 @@ qemuDomainGetIOThreadsInfo(virDomainPtr dom,
return ret;
}
+static int
+qemuDomainGetIOThreadPin(virDomainPtr dom,
+ unsigned int iothread_id,
+ unsigned char *cpumap,
+ int maplen,
+ unsigned int flags)
+{
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virDomainObjPtr vm = NULL;
+ virDomainDefPtr targetDef = NULL;
+ int ret = -1;
+ virCapsPtr caps = NULL;
+ int maxcpu, hostcpus, pcpu;
+ virBitmapPtr cpumask = NULL;
+ bool pinned;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ goto cleanup;
+
+ if (virDomainGetIOThreadPinEnsureACL(dom->conn, vm->def) < 0)
+ goto cleanup;
+
+ if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
+ goto cleanup;
+
+ if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt,
+ vm, &flags, &targetDef) < 0)
+ goto cleanup;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE)
+ targetDef = vm->def;
+
+ /* Coverity didn't realize that targetDef must be set if we got here. */
+ sa_assert(targetDef);
+
+ if ((hostcpus = nodeGetCPUCount()) < 0)
+ goto cleanup;
+
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ /* initialize cpumap */
+ memset(cpumap, 0xff, maplen);
+ if (maxcpu % 8)
+ cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1;
+
+ if (targetDef->cputune.iothreadspin) {
+ if (iothread_id > targetDef->iothreads) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("iothread value out of range %d > %d"),
+ iothread_id, targetDef->iothreads);
+ goto cleanup;
+ }
+
+ cpumask = targetDef->cputune.iothreadspin[iothread_id - 1]->cpumask;
+ } else if (targetDef->cpumask) {
+ cpumask = targetDef->cpumask;
+ } else {
+ ret = 0;
+ goto cleanup;
+ }
+
+ for (pcpu = 0; pcpu < maxcpu; pcpu++) {
+ if (virBitmapGetBit(cpumask, pcpu, &pinned) < 0)
+ goto cleanup;
+ if (!pinned)
+ VIR_UNUSE_CPU(cpumap, pcpu);
+ }
+ ret = 1;
+
+ cleanup:
+ qemuDomObjEndAPI(&vm);
+ virObjectUnref(caps);
+ return ret;
+}
+
+static int
+qemuDomainPinIOThread(virDomainPtr dom,
+ unsigned int iothread_id,
+ unsigned char *cpumap,
+ int maplen,
+ unsigned int flags)
+{
+ int ret = -1;
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virQEMUDriverConfigPtr cfg = NULL;
+ virDomainObjPtr vm;
+ virCapsPtr caps = NULL;
+ virDomainDefPtr persistentDef = NULL;
+ virBitmapPtr pcpumap = NULL;
+ bool doReset = false;
+ qemuDomainObjPrivatePtr priv;
+ virDomainVcpuPinDefPtr *newIOThreadsPin = NULL;
+ size_t newIOThreadsPinNum = 0;
+ virCgroupPtr cgroup_iothread = NULL;
+ virObjectEventPtr event = NULL;
+ char paramField[VIR_TYPED_PARAM_FIELD_LENGTH] = "";
+ char *str = NULL;
+ virTypedParameterPtr eventParams = NULL;
+ int eventNparams = 0;
+ int eventMaxparams = 0;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ cfg = virQEMUDriverGetConfig(driver);
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ goto cleanup;
+ priv = vm->privateData;
+
+ if (virDomainPinIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
+ goto cleanup;
+
+ if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
+ goto cleanup;
+
+ if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("Changing affinity for IOThread dynamically is "
+ "not allowed when CPU placement is 'auto'"));
+ goto cleanup;
+ }
+
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ goto cleanup;
+
+ if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
+ &persistentDef) < 0)
+ goto endjob;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE)
+ persistentDef = vm->def;
+
+ /* Coverity didn't realize that targetDef must be set if we got here. */
+ sa_assert(persistentDef);
+
+ if (!(pcpumap = virBitmapNewData(cpumap, maplen)))
+ goto endjob;
+
+ if (virBitmapIsAllClear(pcpumap)) {
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
+ _("Empty iothread cpumap list for pinning"));
+ goto endjob;
+ }
+
+ /* pinning to all physical cpus means resetting,
+ * so check if we can reset setting.
+ */
+ if (virBitmapIsAllSet(pcpumap))
+ doReset = true;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+
+ if (priv->iothreadpids == NULL) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("IOThread affinity is not
supported"));
+ goto endjob;
+ }
+
+ if (iothread_id > priv->niothreadpids) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("iothread value out of range %d > %d"),
+ iothread_id, priv->niothreadpids);
+ goto endjob;
+ }
+
+ if (vm->def->cputune.iothreadspin) {
+ /* The VcpuPinDefCopy works for IOThreads too */
+ newIOThreadsPin =
+ virDomainVcpuPinDefCopy(vm->def->cputune.iothreadspin,
+ vm->def->cputune.niothreadspin);
+ if (!newIOThreadsPin)
+ goto endjob;
+
+ newIOThreadsPinNum = vm->def->cputune.niothreadspin;
+ } else {
+ if (VIR_ALLOC(newIOThreadsPin) < 0)
+ goto endjob;
+ newIOThreadsPinNum = 0;
+ }
+
+ if (virDomainIOThreadsPinAdd(&newIOThreadsPin, &newIOThreadsPinNum,
+ cpumap, maplen, iothread_id) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update iothreadspin"));
+ virDomainVcpuPinDefArrayFree(newIOThreadsPin, newIOThreadsPinNum);
+ goto endjob;
+ }
+
+ /* Configure the corresponding cpuset cgroup before set affinity. */
+ if (virCgroupHasController(priv->cgroup,
+ VIR_CGROUP_CONTROLLER_CPUSET)) {
+ if (virCgroupNewIOThread(priv->cgroup, iothread_id,
+ false, &cgroup_iothread) < 0)
+ goto endjob;
+ if (qemuSetupCgroupIOThreadsPin(cgroup_iothread,
+ newIOThreadsPin,
+ newIOThreadsPinNum,
+ iothread_id) < 0) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ _("failed to set cpuset.cpus in cgroup"
+ " for iothread %d"), iothread_id);
+ goto endjob;
+ }
+ } else {
+ if (virProcessSetAffinity(priv->iothreadpids[iothread_id - 1],
+ pcpumap) < 0) {
+ virReportError(VIR_ERR_SYSTEM_ERROR,
+ _("failed to set cpu affinity for IOThread
%d"),
+ iothread_id);
+ goto endjob;
+ }
+ }
+
+ if (doReset) {
+ virDomainIOThreadsPinDel(vm->def, iothread_id);
+ } else {
+ if (vm->def->cputune.iothreadspin)
+ virDomainVcpuPinDefArrayFree(vm->def->cputune.iothreadspin,
+ vm->def->cputune.niothreadspin);
+
+ vm->def->cputune.iothreadspin = newIOThreadsPin;
+ vm->def->cputune.niothreadspin = newIOThreadsPinNum;
+ newIOThreadsPin = NULL;
+ }
+
+ if (newIOThreadsPin)
+ virDomainVcpuPinDefArrayFree(newIOThreadsPin, newIOThreadsPinNum);
+
+ if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0)
+ goto endjob;
+
+ if (snprintf(paramField, VIR_TYPED_PARAM_FIELD_LENGTH,
+ VIR_DOMAIN_TUNABLE_CPU_IOTHREADSPIN, iothread_id) < 0) {
+ goto endjob;
+ }
+
+ str = virBitmapFormat(pcpumap);
+ if (virTypedParamsAddString(&eventParams, &eventNparams,
+ &eventMaxparams, paramField, str) < 0)
+ goto endjob;
+
+ event = virDomainEventTunableNewFromDom(dom, eventParams, eventNparams);
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ if (iothread_id > persistentDef->iothreads) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("iothread value out of range %d > %d"),
+ iothread_id, persistentDef->iothreads);
+ goto endjob;
+ }
+
+ if (doReset) {
+ virDomainIOThreadsPinDel(persistentDef, iothread_id);
+ } else {
+ if (!persistentDef->cputune.iothreadspin) {
+ if (VIR_ALLOC(persistentDef->cputune.iothreadspin) < 0)
+ goto endjob;
+ persistentDef->cputune.niothreadspin = 0;
+ }
+ if (virDomainIOThreadsPinAdd(&persistentDef->cputune.iothreadspin,
+ &persistentDef->cputune.niothreadspin,
+ cpumap,
+ maplen,
+ iothread_id) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update or add iothreadspin xml "
+ "of a persistent domain"));
+ goto endjob;
+ }
+ }
+
+ ret = virDomainSaveConfig(cfg->configDir, persistentDef);
+ goto endjob;
+ }
+
+ ret = 0;
+
+ endjob:
+ qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+ if (cgroup_iothread)
+ virCgroupFree(&cgroup_iothread);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ VIR_FREE(str);
+ virBitmapFree(pcpumap);
+ qemuDomObjEndAPI(&vm);
+ virObjectUnref(caps);
+ virObjectUnref(cfg);
+ return ret;
+}
+
static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
{
virQEMUDriverPtr driver = dom->conn->privateData;
@@ -19332,6 +19632,8 @@ static virHypervisorDriver qemuHypervisorDriver = {
.domainGetVcpus = qemuDomainGetVcpus, /* 0.4.4 */
.domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
.domainGetIOThreadsInfo = qemuDomainGetIOThreadsInfo, /* 1.2.14 */
+ .domainGetIOThreadPin = qemuDomainGetIOThreadPin, /* 1.2.14 */
+ .domainPinIOThread = qemuDomainPinIOThread, /* 1.2.14 */
.domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
.domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
.nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */
--
2.1.0