From: Tang Chen <tangchen(a)cn.fujitsu.com>
Introduce 2 APIs to support hypervisor threads pin in qemu driver.
1) qemudDomainPinHypervisorFlags: setup hypervisor threads pin info.
2) qemudDomainGetHypervisorPinInfo: get all hypervisor threads pin info.
They are similar to qemudDomainPinVcpuFlags and qemudDomainGetVcpuPinInfo.
And also, remoteDispatchDomainPinHypervisorFlags and
remoteDispatchDomainGetHypervisorPinInfo
functions are introduced.
Signed-off-by: Tang Chen <tangchen(a)cn.fujitsu.com>
Signed-off-by: Hu Tao <hutao(a)cn.fujitsu.com>
---
src/qemu/qemu_driver.c | 223 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 223 insertions(+)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 2da13a4..3b1bf2c 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -3851,6 +3851,227 @@ cleanup:
}
static int
+qemudDomainPinHypervisorFlags(virDomainPtr dom,
+ unsigned char *cpumap,
+ int maplen,
+ unsigned int flags)
+{
+ struct qemud_driver *driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ virCgroupPtr cgroup_dom = NULL;
+ virCgroupPtr cgroup_hypervisor = NULL;
+ pid_t pid;
+ virDomainDefPtr persistentDef = NULL;
+ int maxcpu, hostcpus;
+ virNodeInfo nodeinfo;
+ int ret = -1;
+ qemuDomainObjPrivatePtr priv;
+ bool canResetting = true;
+ int pcpu;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
+
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(dom->uuid, uuidstr);
+ virReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ if (virDomainLiveConfigHelperMethod(driver->caps, vm, &flags,
+ &persistentDef) < 0)
+ goto cleanup;
+
+ priv = vm->privateData;
+
+ if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
+ goto cleanup;
+ hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+ /* pinning to all physical cpus means resetting,
+ * so check if we can reset setting.
+ */
+ for (pcpu = 0; pcpu < hostcpus; pcpu++) {
+ if ((cpumap[pcpu/8] & (1 << (pcpu % 8))) == 0) {
+ canResetting = false;
+ break;
+ }
+ }
+
+ pid = vm->pid;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+
+ if (priv->vcpupids != NULL) {
+ if (virDomainHypervisorPinAdd(vm->def, cpumap, maplen) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update or add hypervisorpin xml "
+ "of a running domain"));
+ goto cleanup;
+ }
+
+ if (qemuCgroupControllerActive(driver,
+ VIR_CGROUP_CONTROLLER_CPUSET)) {
+ /*
+ * Configure the corresponding cpuset cgroup.
+ * If no cgroup for domain or hypervisor exists, do nothing.
+ */
+ if (virCgroupForDomain(driver->cgroup, vm->def->name,
+ &cgroup_dom, 0) == 0) {
+ if (virCgroupForHypervisor(cgroup_dom, &cgroup_hypervisor, 0) ==
0) {
+ if (qemuSetupCgroupHypervisorPin(cgroup_hypervisor, vm->def)
< 0) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("failed to set cpuset.cpus in
cgroup"
+ " for hypervisor threads"));
+ goto cleanup;
+ }
+ }
+ }
+ }
+
+ if (canResetting) {
+ if (virDomainHypervisorPinDel(vm->def) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to delete hypervisorpin xml of "
+ "a running domain"));
+ goto cleanup;
+ }
+ }
+
+ if (virProcessInfoSetAffinity(pid, cpumap, maplen, maxcpu) < 0) {
+ virReportError(VIR_ERR_SYSTEM_ERROR, "%s",
+ _("failed to set cpu affinity for "
+ "hypervisor threads"));
+ goto cleanup;
+ }
+ } else {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("cpu affinity is not
supported"));
+ goto cleanup;
+ }
+
+ if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
+ goto cleanup;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+
+ if (canResetting) {
+ if (virDomainHypervisorPinDel(persistentDef) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to delete hypervisorpin xml of "
+ "a persistent domain"));
+ goto cleanup;
+ }
+ } else {
+ if (virDomainHypervisorPinAdd(persistentDef, cpumap, maplen) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update or add hypervisorpin xml "
+ "of a persistent domain"));
+ goto cleanup;
+ }
+ }
+
+ ret = virDomainSaveConfig(driver->configDir, persistentDef);
+ goto cleanup;
+ }
+
+ ret = 0;
+
+cleanup:
+ if (cgroup_hypervisor)
+ virCgroupFree(&cgroup_hypervisor);
+ if (cgroup_dom)
+ virCgroupFree(&cgroup_dom);
+
+ if (vm)
+ virDomainObjUnlock(vm);
+ return ret;
+}
+
+static int
+qemudDomainGetHypervisorPinInfo(virDomainPtr dom,
+ unsigned char *cpumaps,
+ int maplen,
+ unsigned int flags)
+{
+ struct qemud_driver *driver = dom->conn->privateData;
+ virDomainObjPtr vm = NULL;
+ virNodeInfo nodeinfo;
+ virDomainDefPtr targetDef = NULL;
+ int ret = -1;
+ int maxcpu, hostcpus, pcpu;
+ virDomainVcpuPinDefPtr hypervisorpin = NULL;
+ char *cpumask = NULL;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
+
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(dom->uuid, uuidstr);
+ virReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ if (virDomainLiveConfigHelperMethod(driver->caps, vm, &flags,
+ &targetDef) < 0)
+ goto cleanup;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE)
+ targetDef = vm->def;
+
+ /* Coverity didn't realize that targetDef must be set if we got here. */
+ sa_assert(targetDef);
+
+ if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
+ goto cleanup;
+ hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ /* initialize cpumaps */
+ memset(cpumaps, 0xff, maplen);
+ if (maxcpu % 8) {
+ cpumaps[maplen - 1] &= (1 << maxcpu % 8) - 1;
+ }
+
+ /* If no hypervisorpin, all cpus should be used */
+ hypervisorpin = targetDef->cputune.hypervisorpin;
+ if (!hypervisorpin) {
+ ret = 0;
+ goto cleanup;
+ }
+
+ cpumask = hypervisorpin->cpumask;
+ for (pcpu = 0; pcpu < maxcpu; pcpu++) {
+ if (cpumask[pcpu] == 0)
+ VIR_UNUSE_CPU(cpumaps, pcpu);
+ }
+
+ ret = 1;
+
+cleanup:
+ if (vm)
+ virDomainObjUnlock(vm);
+ return ret;
+}
+
+static int
qemudDomainGetVcpus(virDomainPtr dom,
virVcpuInfoPtr info,
int maxinfo,
@@ -13255,6 +13476,8 @@ static virDriver qemuDriver = {
.domainPinVcpu = qemudDomainPinVcpu, /* 0.4.4 */
.domainPinVcpuFlags = qemudDomainPinVcpuFlags, /* 0.9.3 */
.domainGetVcpuPinInfo = qemudDomainGetVcpuPinInfo, /* 0.9.3 */
+ .domainPinHypervisorFlags = qemudDomainPinHypervisorFlags, /* 0.9.13 */
+ .domainGetHypervisorPinInfo = qemudDomainGetHypervisorPinInfo, /* 0.9.13 */
.domainGetVcpus = qemudDomainGetVcpus, /* 0.4.4 */
.domainGetMaxVcpus = qemudDomainGetMaxVcpus, /* 0.4.4 */
.domainGetSecurityLabel = qemudDomainGetSecurityLabel, /* 0.6.1 */
--
1.7.10.2