The nodeinfo is reporting incorrect number of cpus and incorrect host
topology on PPC64 KVM hosts. The KVM hypervisor on PPC64 needs only
the primary thread in a core to be online, and the secondaries offlined.
While scheduling a guest in, the kvm scheduler wakes up the secondaries to
run in guest context.
The host scheduling of the guests happen at the core level(as only primary
thread is online). The kvm scheduler exploits as many threads of the core
as needed by guest. Further, starting POWER8, the processor allows splitting
a physical core into multiple subcores with 2 or 4 threads each. Again, only
the primary thread in a subcore is online in the host. The KVM-PPC
scheduler allows guests to exploit all the offline threads in the subcore,
by bringing them online when needed.
(Kernel patches on split-core
http://www.spinics.net/lists/kvm-ppc/msg09121.html)
Recently with dynamic micro-threading changes in ppc-kvm, makes sure
to utilize all the offline cpus across guests, and across guests with
different cpu topologies.
(
https://www.mail-archive.com/kvm@vger.kernel.org/msg115978.html)
Since the offline cpus are brought online in the guest context, it is safe
to count them as online. Nodeinfo today discounts these offline cpus from
cpu count/topology calclulation, and the nodeinfo output is not of any help
and the host appears overcommited when it is actually not.
The patch carefully counts those offline threads whose primary threads are
online. The host topology displayed by the nodeinfo is also fixed when the
host is in valid kvm state.
Test results with and without fix:
http://ur1.ca/mx25y
Signed-off-by: Shivaprasad G Bhat <sbhat(a)linux.vnet.ibm.com>
---
src/libvirt_private.syms | 1 +
src/nodeinfo.c | 87 ++++++++++++++++++++++++++++++++++++++++------
src/util/virarch.c | 35 +++++++++++++++++++
src/util/virarch.h | 2 +
4 files changed, 114 insertions(+), 11 deletions(-)
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 1566d11..071fe6e 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1075,6 +1075,7 @@ virArchFromHost;
virArchFromString;
virArchGetEndian;
virArchGetWordSize;
+virArchPPCGetKvmHostCoreMode;
virArchToString;
diff --git a/src/nodeinfo.c b/src/nodeinfo.c
index 2fafe2d..9d1dc84 100644
--- a/src/nodeinfo.c
+++ b/src/nodeinfo.c
@@ -428,25 +428,77 @@ virNodeParseNode(const char *node,
unsigned int cpu;
int online;
int direrr;
+ int lastonline;
+ virBitmapPtr nodecpus = NULL;
+ int threads_per_subcore = 1;
+ bool valid_ppc64_kvmhost_mode = false;
*threads = 0;
*cores = 0;
*sockets = 0;
+ if (ARCH_IS_PPC64(arch)) {
+ /* PPC-KVM needs the secondary threads of a core to be offline on the
+ * host. The kvm scheduler brings the secondary threads online in the
+ * guest context. Moreover, P8 processor has split-core capability
+ * where, there can be 1,2 or 4 subcores per core. The primaries of the
+ * subcores alone will be online on the host for a subcore in the
+ * host. Even though the actual threads per core for P8 processor is 8,
+ * depending on the subcores_per_core = 1, 2 or 4, the threads per
+ * subcore will vary accordingly to 8, 4 and 2 repectively.
+ * So, On host threads_per_core what is arrived at from sysfs in the
+ * current logic is actually the subcores_per_core. Threads per subcore
+ * can only be obtained from the kvm device. For example, on P8 wih 1
+ * core having 8 threads, sub_cores_percore=4, the threads 0,2,4 & 6
+ * will be online. The sysfs reflects this and in the current logic
+ * variable 'threads' will be 4 which is nothing but subcores_per_core.
+ * If the user tampers the cpu online/offline states using chcpu or other
+ * means, then it is an unsupported configuration for kvm.
+ * The code below tries to keep in mind
+ * - when the libvirtd is run inside a KVM guest or Phyp based guest.
+ * - Or on the kvm host where user manually tampers the cpu states to
+ * offline/online randomly.
+ */
+ valid_ppc64_kvmhost_mode =
virArchPPCGetKvmHostCoreMode(&threads_per_subcore);
+ }
+
+ nodecpus = virBitmapNew(VIR_DOMAIN_CPUMASK_LEN);
+
if (!(cpudir = opendir(node))) {
virReportSystemError(errno, _("cannot opendir %s"), node);
goto cleanup;
}
- /* enumerate sockets in the node */
- CPU_ZERO(&sock_map);
while ((direrr = virDirRead(cpudir, &cpudirent, node)) > 0) {
if (sscanf(cpudirent->d_name, "cpu%u", &cpu) != 1)
continue;
+ if (virBitmapSetBit(nodecpus, cpu))
+ goto cleanup;
+ }
+
+ if (direrr < 0)
+ goto cleanup;
+
+ /* enumerate sockets in the node */
+ CPU_ZERO(&sock_map);
+ lastonline = -1;
+ for (cpu = 0; cpu < virBitmapSize(nodecpus); cpu++) {
+ if (!virBitmapIsBitSet(nodecpus, cpu))
+ continue;
+
if ((online = virNodeGetCpuValue(node, cpu, "online", 1)) < 0)
goto cleanup;
+ if (online && valid_ppc64_kvmhost_mode) {
+ if (lastonline != -1 &&
+ (cpu - lastonline) < threads_per_subcore) {
+ /* if any of secondaries online */
+ valid_ppc64_kvmhost_mode = false;
+ }
+ lastonline = cpu;
+ }
+
if (!online)
continue;
@@ -459,9 +511,6 @@ virNodeParseNode(const char *node,
sock_max = sock;
}
- if (direrr < 0)
- goto cleanup;
-
sock_max++;
/* allocate cpu maps for each socket */
@@ -472,14 +521,25 @@ virNodeParseNode(const char *node,
CPU_ZERO(&core_maps[i]);
/* iterate over all CPU's in the node */
- rewinddir(cpudir);
- while ((direrr = virDirRead(cpudir, &cpudirent, node)) > 0) {
- if (sscanf(cpudirent->d_name, "cpu%u", &cpu) != 1)
+ lastonline = -1;
+ for (cpu = 0; cpu < virBitmapSize(nodecpus); cpu++) {
+ if (!virBitmapIsBitSet(nodecpus, cpu))
continue;
if ((online = virNodeGetCpuValue(node, cpu, "online", 1)) < 0)
goto cleanup;
+ if (valid_ppc64_kvmhost_mode) {
+ if (online) {
+ lastonline = cpu;
+ } else if (lastonline != -1 &&
+ (cpu-lastonline) < threads_per_subcore) {
+ processors++; /* Count only those secondaries whose primary
+ subcore is online */
+ continue;
+ }
+ }
+
if (!online) {
(*offline)++;
continue;
@@ -513,9 +573,6 @@ virNodeParseNode(const char *node,
*threads = siblings;
}
- if (direrr < 0)
- goto cleanup;
-
/* finalize the returned data */
*sockets = CPU_COUNT(&sock_map);
@@ -528,6 +585,13 @@ virNodeParseNode(const char *node,
*cores = core;
}
+ if (valid_ppc64_kvmhost_mode) {
+ /* The actual host threads per core is
+ * multiple of the subcores_per_core(i.e variable *threads in this case)
+ * and threads_per_subcore.
+ */
+ *threads = *threads * threads_per_subcore;
+ }
ret = processors;
cleanup:
@@ -537,6 +601,7 @@ virNodeParseNode(const char *node,
ret = -1;
}
VIR_FREE(core_maps);
+ virBitmapFree(nodecpus);
return ret;
}
diff --git a/src/util/virarch.c b/src/util/virarch.c
index be48bcf..3717de0 100644
--- a/src/util/virarch.c
+++ b/src/util/virarch.c
@@ -25,8 +25,16 @@
#include "virlog.h"
#include "virarch.h"
+#include "virfile.h"
#include "verify.h"
+#include <fcntl.h>
+#include <sys/ioctl.h>
+
+#if HAVE_LINUX_KVM_H
+# include <linux/kvm.h>
+#endif
+
VIR_LOG_INIT("util.arch");
/* The canonical names are used in XML documents. ie ABI sensitive */
@@ -182,3 +190,30 @@ virArch virArchFromHost(void)
return arch;
}
+
+bool virArchPPCGetKvmHostCoreMode(int *threads_per_subcore)
+{
+ int kvmfd;
+ bool valid_ppc64_kvmhost_mode = false;
+
+#if HAVE_LINUX_KVM_H
+ kvmfd = open("/dev/kvm", O_RDONLY);
+ if (kvmfd >= 0) {
+# ifdef KVM_CAP_PPC_SMT
+
+ valid_ppc64_kvmhost_mode = true;
+ /* For Phyp and KVM based guests the ioctl for KVM_CAP_PPC_SMT
+ * returns zero and both primary and secondary threads will be
+ * online.
+ */
+ *threads_per_subcore = ioctl(kvmfd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_SMT);
+ if (*threads_per_subcore == 0)
+ valid_ppc64_kvmhost_mode = false;
+
+# endif
+ }
+ VIR_FORCE_CLOSE(kvmfd);
+#endif
+
+ return valid_ppc64_kvmhost_mode;
+}
diff --git a/src/util/virarch.h b/src/util/virarch.h
index 3206ce2..6e196ff 100644
--- a/src/util/virarch.h
+++ b/src/util/virarch.h
@@ -102,4 +102,6 @@ virArch virArchFromString(const char *name);
virArch virArchFromHost(void);
+bool virArchPPCGetKvmHostCoreMode(int *threads_per_subcore);
+
#endif /* __VIR_ARCH_H__ */