On Wed, Jun 08, 2016 at 14:41:38 +0200, Jiri Denemark wrote:
Our current detection code uses just the number of CPU features
which
need to be added/removed from the CPU model to fully describe the CPUID
data. The smallest number wins. But this may sometimes generate wrong
results as one can see from the fixed test cases. This patch modifies
the algorithm to prefer the CPU model with matching signature even if
this model results in a longer list of additional features.
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/cpu/cpu_map.xml | 16 +++
src/cpu/cpu_x86.c | 146 +++++++++++++++++++--
.../cputestdata/x86-cpuid-Core-i7-5600U-guest.xml | 11 +-
tests/cputestdata/x86-cpuid-Core2-E6850-guest.xml | 5 +-
tests/cputestdata/x86-cpuid-Core2-E6850-host.xml | 5 +-
tests/cputestdata/x86-cpuid-Xeon-5110-guest.xml | 5 +-
tests/cputestdata/x86-cpuid-Xeon-5110-host.xml | 5 +-
tests/cputestdata/x86-host+penryn-force-result.xml | 4 +-
8 files changed, 174 insertions(+), 23 deletions(-)
[...]
diff --git a/src/cpu/cpu_x86.c b/src/cpu/cpu_x86.c
index 6823a27..b1a4b93 100644
--- a/src/cpu/cpu_x86.c
+++ b/src/cpu/cpu_x86.c
@@ -493,6 +494,75 @@ x86DataToVendor(const virCPUx86Data *data,
}
+static uint32_t
+x86MakeSignature(unsigned int family,
+ unsigned int model)
+{
+ uint32_t sig = 0;
+
+ /*
+ * CPU signature (eax from 0x1 CPUID leaf):
+ *
+ * |31 .. 28|27 .. 20|19 .. 16|15 .. 14|13 .. 12|11 .. 8|7 .. 4|3 .. 0|
+ * | R | extFam | extMod | R | PType | Fam | Mod | Step |
+ *
+ * R reserved
+ * extFam extended family (valid only if Fam == 0xf)
+ * extMod extended model
+ * PType processor type
+ * Fam family
+ * Mod model
+ * Step stepping
+ *
+ * family = eax[27:20] + eax[11:8]
+ * model = eax[19:16] << 4 + eax[7:4]
+ */
+
+ /* extFam */
+ if (family > 0xf) {
+ sig |= (family - 0xf) << 20;
+ family = 0xf;
+ }
+
+ /* extMod */
+ sig |= (model >> 4) << 16;
+
+ /* Fam */
+ sig |= family << 8;
+
+ /* Mod */
+ sig |= (model & 0xf) << 4;
+
+ return sig;
+}
+
+
+/* Mask out irrelevant bits (R and Step) from processor signature. */
+#define SIGNATURE_MASK 0x0fff3ff0
This mask is not removing bits 12 and 13 corresponding to the processor
type, but we don't take them into account in our code. If that's
intentional please note it in the comment
+
+static uint32_t
+x86DataToSignature(const virCPUx86Data *data)
+{
+ virCPUx86CPUID leaf1 = { .eax_in = 0x1 };
+ virCPUx86CPUID *cpuid;
+
+ if (!(cpuid = x86DataCpuid(data, &leaf1)))
+ return 0;
+
+ return cpuid->eax & SIGNATURE_MASK;
+}
+
+
@@ -1093,6 +1164,34 @@ x86ModelParse(xmlXPathContextPtr ctxt,
goto error;
}
+ if (virXPathBoolean("boolean(./signature)", ctxt)) {
+ unsigned int sigFamily = 0;
+ unsigned int sigModel = 0;
+ int rc;
+
+ rc = virXPathUInt("string(./signature/@family)", ctxt,
&sigFamily);
+ if (rc < 0) {
+ if (rc == -2 || (rc == 0 && !sigFamily)) {
Second part of the condition is a contradiction. Could you please
clarify your intent here?
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Invalid CPU family in model %s"),
+ model->name);
+ }
+ goto cleanup;
+ }
+
+ rc = virXPathUInt("string(./signature/@model)", ctxt, &sigModel);
+ if (rc < 0) {
+ if (rc == -2 || (rc == 0 && !sigModel)) {
Same here.
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Invalid CPU model in model %s"),
+ model->name);
+ }
+ goto cleanup;
+ }
+
+ model->signature = x86MakeSignature(sigFamily, sigModel);
+ }
+
if (virXPathBoolean("boolean(./vendor)", ctxt)) {
vendor = virXPathString("string(./vendor/@name)", ctxt);
if (!vendor) {
I want to see a clarification on the code here before I approve it.