From: Tang Chen <tangchen(a)cn.fujitsu.com>
Introduce 2 APIs to support hypervisor threads pin in qemu driver.
1) qemudDomainPinHypervisorFlags: setup hypervisor threads pin info.
2) qemudDomainGetHypervisorPinInfo: get all hypervisor threads pin info.
They are similar to qemudDomainPinVcpuFlags and qemudDomainGetVcpuPinInfo.
And also, remoteDispatchDomainPinHypervisorFlags and
remoteDispatchDomainGetHypervisorPinInfo
functions are introduced.
Signed-off-by: Tang Chen <tangchen(a)cn.fujitsu.com>
Signed-off-by: Hu Tao <hutao(a)cn.fujitsu.com>
---
daemon/remote.c | 103 ++++++++++++++++++++++
src/qemu/qemu_driver.c | 223 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 326 insertions(+)
diff --git a/daemon/remote.c b/daemon/remote.c
index 80626a2..0e46d18 100644
--- a/daemon/remote.c
+++ b/daemon/remote.c
@@ -1533,6 +1533,109 @@ no_memory:
}
static int
+remoteDispatchDomainPinHypervisorFlags(virNetServerPtr server ATTRIBUTE_UNUSED,
+ virNetServerClientPtr client,
+ virNetMessagePtr msg ATTRIBUTE_UNUSED,
+ virNetMessageErrorPtr rerr,
+ remote_domain_pin_hypervisor_flags_args *args)
+{
+ int rv = -1;
+ virDomainPtr dom = NULL;
+ struct daemonClientPrivate *priv =
+ virNetServerClientGetPrivateData(client);
+
+ if (!priv->conn) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not
open"));
+ goto cleanup;
+ }
+
+ if (!(dom = get_nonnull_domain(priv->conn, args->dom)))
+ goto cleanup;
+
+ if (virDomainPinHypervisorFlags(dom,
+ (unsigned char *) args->cpumap.cpumap_val,
+ args->cpumap.cpumap_len,
+ args->flags) < 0)
+ goto cleanup;
+
+ rv = 0;
+
+cleanup:
+ if (rv < 0)
+ virNetMessageSaveError(rerr);
+ if (dom)
+ virDomainFree(dom);
+ return rv;
+}
+
+
+static int
+remoteDispatchDomainGetHypervisorPinInfo(virNetServerPtr server ATTRIBUTE_UNUSED,
+ virNetServerClientPtr client ATTRIBUTE_UNUSED,
+ virNetMessagePtr msg ATTRIBUTE_UNUSED,
+ virNetMessageErrorPtr rerr,
+ remote_domain_get_hypervisor_pin_info_args
*args,
+ remote_domain_get_hypervisor_pin_info_ret *ret)
+{
+ virDomainPtr dom = NULL;
+ unsigned char *cpumaps = NULL;
+ int num;
+ int rv = -1;
+ struct daemonClientPrivate *priv =
+ virNetServerClientGetPrivateData(client);
+
+ if (!priv->conn) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not
open"));
+ goto cleanup;
+ }
+
+ if (!(dom = get_nonnull_domain(priv->conn, args->dom)))
+ goto cleanup;
+
+ /* There is only one cpumap struct for all hypervisor threads */
+ if (args->ncpumaps != 1) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("ncpumaps !=
1"));
+ goto cleanup;
+ }
+
+ if (INT_MULTIPLY_OVERFLOW(args->ncpumaps, args->maplen) ||
+ args->ncpumaps * args->maplen > REMOTE_CPUMAPS_MAX) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("maxinfo * maplen
> REMOTE_CPUMAPS_MAX"));
+ goto cleanup;
+ }
+
+ /* Allocate buffers to take the results */
+ if (args->maplen > 0 &&
+ VIR_ALLOC_N(cpumaps, args->maplen) < 0)
+ goto no_memory;
+
+ if ((num = virDomainGetHypervisorPinInfo(dom,
+ cpumaps,
+ args->maplen,
+ args->flags)) < 0)
+ goto cleanup;
+
+ ret->num = num;
+ ret->cpumaps.cpumaps_len = args->maplen;
+ ret->cpumaps.cpumaps_val = (char *) cpumaps;
+ cpumaps = NULL;
+
+ rv = 0;
+
+cleanup:
+ if (rv < 0)
+ virNetMessageSaveError(rerr);
+ VIR_FREE(cpumaps);
+ if (dom)
+ virDomainFree(dom);
+ return rv;
+
+no_memory:
+ virReportOOMError();
+ goto cleanup;
+}
+
+static int
remoteDispatchDomainGetVcpus(virNetServerPtr server ATTRIBUTE_UNUSED,
virNetServerClientPtr client ATTRIBUTE_UNUSED,
virNetMessagePtr msg ATTRIBUTE_UNUSED,
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 7641fa6..5cc8e94 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -3851,6 +3851,227 @@ cleanup:
}
static int
+qemudDomainPinHypervisorFlags(virDomainPtr dom,
+ unsigned char *cpumap,
+ int maplen,
+ unsigned int flags)
+{
+ struct qemud_driver *driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ virCgroupPtr cgroup_dom = NULL;
+ virCgroupPtr cgroup_hypervisor = NULL;
+ pid_t pid;
+ virDomainDefPtr persistentDef = NULL;
+ int maxcpu, hostcpus;
+ virNodeInfo nodeinfo;
+ int ret = -1;
+ qemuDomainObjPrivatePtr priv;
+ bool canResetting = true;
+ int pcpu;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
+
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(dom->uuid, uuidstr);
+ virReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ if (virDomainLiveConfigHelperMethod(driver->caps, vm, &flags,
+ &persistentDef) < 0)
+ goto cleanup;
+
+ priv = vm->privateData;
+
+ if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
+ goto cleanup;
+ hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+ /* pinning to all physical cpus means resetting,
+ * so check if we can reset setting.
+ */
+ for (pcpu = 0; pcpu < hostcpus; pcpu++) {
+ if ((cpumap[pcpu/8] & (1 << (pcpu % 8))) == 0) {
+ canResetting = false;
+ break;
+ }
+ }
+
+ pid = vm->pid;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+
+ if (priv->vcpupids != NULL) {
+ if (virDomainHypervisorPinAdd(vm->def, cpumap, maplen) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update or add hypervisorpin xml "
+ "of a running domain"));
+ goto cleanup;
+ }
+
+ if (qemuCgroupControllerActive(driver,
+ VIR_CGROUP_CONTROLLER_CPUSET)) {
+ /*
+ * Configure the corresponding cpuset cgroup.
+ * If no cgroup for domain or hypervisor exists, do nothing.
+ */
+ if (virCgroupForDomain(driver->cgroup, vm->def->name,
+ &cgroup_dom, 0) == 0) {
+ if (virCgroupForHypervisor(cgroup_dom, &cgroup_hypervisor, 0) ==
0) {
+ if (qemuSetupCgroupHypervisorPin(cgroup_hypervisor, vm->def)
< 0) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("failed to set cpuset.cpus in
cgroup"
+ " for hypervisor threads"));
+ goto cleanup;
+ }
+ }
+ }
+ }
+
+ if (canResetting) {
+ if (virDomainHypervisorPinDel(vm->def) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to delete hypervisorpin xml of "
+ "a running domain"));
+ goto cleanup;
+ }
+ }
+
+ if (virProcessInfoSetAffinity(pid, cpumap, maplen, maxcpu) < 0) {
+ virReportError(VIR_ERR_SYSTEM_ERROR, "%s",
+ _("failed to set cpu affinity for "
+ "hypervisor threads"));
+ goto cleanup;
+ }
+ } else {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("cpu affinity is not
supported"));
+ goto cleanup;
+ }
+
+ if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
+ goto cleanup;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+
+ if (canResetting) {
+ if (virDomainHypervisorPinDel(persistentDef) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to delete hypervisorpin xml of "
+ "a persistent domain"));
+ goto cleanup;
+ }
+ } else {
+ if (virDomainHypervisorPinAdd(persistentDef, cpumap, maplen) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to update or add hypervisorpin xml "
+ "of a persistent domain"));
+ goto cleanup;
+ }
+ }
+
+ ret = virDomainSaveConfig(driver->configDir, persistentDef);
+ goto cleanup;
+ }
+
+ ret = 0;
+
+cleanup:
+ if (cgroup_hypervisor)
+ virCgroupFree(&cgroup_hypervisor);
+ if (cgroup_dom)
+ virCgroupFree(&cgroup_dom);
+
+ if (vm)
+ virDomainObjUnlock(vm);
+ return ret;
+}
+
+static int
+qemudDomainGetHypervisorPinInfo(virDomainPtr dom,
+ unsigned char *cpumaps,
+ int maplen,
+ unsigned int flags)
+{
+ struct qemud_driver *driver = dom->conn->privateData;
+ virDomainObjPtr vm = NULL;
+ virNodeInfo nodeinfo;
+ virDomainDefPtr targetDef = NULL;
+ int ret = -1;
+ int maxcpu, hostcpus, pcpu;
+ virDomainVcpuPinDefPtr hypervisorpin = NULL;
+ char *cpumask = NULL;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
+
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(dom->uuid, uuidstr);
+ virReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ if (virDomainLiveConfigHelperMethod(driver->caps, vm, &flags,
+ &targetDef) < 0)
+ goto cleanup;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE)
+ targetDef = vm->def;
+
+ /* Coverity didn't realize that targetDef must be set if we got here. */
+ sa_assert(targetDef);
+
+ if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
+ goto cleanup;
+ hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
+ maxcpu = maplen * 8;
+ if (maxcpu > hostcpus)
+ maxcpu = hostcpus;
+
+ /* initialize cpumaps */
+ memset(cpumaps, 0xff, maplen);
+ if (maxcpu % 8) {
+ cpumaps[maplen - 1] &= (1 << maxcpu % 8) - 1;
+ }
+
+ /* If no hypervisorpin, all cpus should be used */
+ hypervisorpin = targetDef->cputune.hypervisorpin;
+ if (!hypervisorpin) {
+ ret = 0;
+ goto cleanup;
+ }
+
+ cpumask = hypervisorpin->cpumask;
+ for (pcpu = 0; pcpu < maxcpu; pcpu++) {
+ if (cpumask[pcpu] == 0)
+ VIR_UNUSE_CPU(cpumaps, pcpu);
+ }
+
+ ret = 1;
+
+cleanup:
+ if (vm)
+ virDomainObjUnlock(vm);
+ return ret;
+}
+
+static int
qemudDomainGetVcpus(virDomainPtr dom,
virVcpuInfoPtr info,
int maxinfo,
@@ -13249,6 +13470,8 @@ static virDriver qemuDriver = {
.domainPinVcpu = qemudDomainPinVcpu, /* 0.4.4 */
.domainPinVcpuFlags = qemudDomainPinVcpuFlags, /* 0.9.3 */
.domainGetVcpuPinInfo = qemudDomainGetVcpuPinInfo, /* 0.9.3 */
+ .domainPinHypervisorFlags = qemudDomainPinHypervisorFlags, /* 0.9.13 */
+ .domainGetHypervisorPinInfo = qemudDomainGetHypervisorPinInfo, /* 0.9.13 */
.domainGetVcpus = qemudDomainGetVcpus, /* 0.4.4 */
.domainGetMaxVcpus = qemudDomainGetMaxVcpus, /* 0.4.4 */
.domainGetSecurityLabel = qemudDomainGetSecurityLabel, /* 0.6.1 */
--
1.7.10.2