Until now the map was loaded from the XML definition file every time a
operation on the flags was requested. With the introduciton of one shot
initializers we can store the definition forever (as it will never
change) instead of parsing it over and over again.
---
Notes:
Version 2:
- kept the map loading function separate so that tests can use it in the future
src/cpu/cpu_x86.c | 67 +++++++++++++++++++++++++++++++++----------------------
1 file changed, 40 insertions(+), 27 deletions(-)
diff --git a/src/cpu/cpu_x86.c b/src/cpu/cpu_x86.c
index 1b1f2b4..ba6a2b0 100644
--- a/src/cpu/cpu_x86.c
+++ b/src/cpu/cpu_x86.c
@@ -70,6 +70,10 @@ struct x86_map {
struct x86_model *models;
};
+static struct x86_map* virCPUx86Map = NULL;
+int virCPUx86MapOnceInit(void);
+VIR_ONCE_GLOBAL_INIT(virCPUx86Map);
+
enum compare_result {
SUBSET,
@@ -1065,7 +1069,7 @@ x86MapLoadCallback(enum cpuMapElement element,
static struct x86_map *
-x86LoadMap(void)
+virCPUx86LoadMap(void)
{
struct x86_map *map;
@@ -1083,6 +1087,26 @@ error:
}
+int
+virCPUx86MapOnceInit(void)
+{
+ if (!(virCPUx86Map = virCPUx86LoadMap()))
+ return -1;
+
+ return 0;
+}
+
+
+static const struct x86_map *
+virCPUx86GetMap(void)
+{
+ if (virCPUx86MapInitialize() < 0)
+ return NULL;
+
+ return virCPUx86Map;
+}
+
+
static char *
x86CPUDataFormat(const virCPUData *data)
{
@@ -1194,7 +1218,7 @@ x86Compute(virCPUDefPtr host,
virCPUDataPtr *guest,
char **message)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
struct x86_model *host_model = NULL;
struct x86_model *cpu_force = NULL;
struct x86_model *cpu_require = NULL;
@@ -1247,7 +1271,7 @@ x86Compute(virCPUDefPtr host,
return VIR_CPU_COMPARE_INCOMPATIBLE;
}
- if (!(map = x86LoadMap()) ||
+ if (!(map = virCPUx86GetMap()) ||
!(host_model = x86ModelFromCPU(host, map, VIR_CPU_FEATURE_REQUIRE)) ||
!(cpu_force = x86ModelFromCPU(cpu, map, VIR_CPU_FEATURE_FORCE)) ||
!(cpu_require = x86ModelFromCPU(cpu, map, VIR_CPU_FEATURE_REQUIRE)) ||
@@ -1323,7 +1347,6 @@ x86Compute(virCPUDefPtr host,
}
cleanup:
- x86MapFree(map);
x86ModelFree(host_model);
x86ModelFree(diff);
x86ModelFree(cpu_force);
@@ -1362,7 +1385,7 @@ x86GuestData(virCPUDefPtr host,
static int
x86AddFeatures(virCPUDefPtr cpu,
- struct x86_map *map)
+ const struct x86_map *map)
{
const struct x86_model *candidate;
const struct x86_feature *feature = map->features;
@@ -1398,7 +1421,7 @@ x86Decode(virCPUDefPtr cpu,
unsigned int flags)
{
int ret = -1;
- struct x86_map *map;
+ const struct x86_map *map;
const struct x86_model *candidate;
virCPUDefPtr cpuCandidate;
virCPUDefPtr cpuModel = NULL;
@@ -1406,7 +1429,7 @@ x86Decode(virCPUDefPtr cpu,
virCheckFlags(VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES, -1);
- if (data == NULL || (map = x86LoadMap()) == NULL)
+ if (!data || !(map = virCPUx86GetMap()))
return -1;
candidate = map->models;
@@ -1490,7 +1513,6 @@ x86Decode(virCPUDefPtr cpu,
ret = 0;
out:
- x86MapFree(map);
virCPUDefFree(cpuModel);
return ret;
@@ -1537,14 +1559,13 @@ x86Encode(virArch arch,
virCPUDataPtr *forbidden,
virCPUDataPtr *vendor)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
virCPUx86Data *data_forced = NULL;
virCPUx86Data *data_required = NULL;
virCPUx86Data *data_optional = NULL;
virCPUx86Data *data_disabled = NULL;
virCPUx86Data *data_forbidden = NULL;
virCPUx86Data *data_vendor = NULL;
- int ret = -1;
if (forced)
*forced = NULL;
@@ -1559,7 +1580,7 @@ x86Encode(virArch arch,
if (vendor)
*vendor = NULL;
- if ((map = x86LoadMap()) == NULL)
+ if ((map = virCPUx86GetMap()) == NULL)
goto error;
if (forced) {
@@ -1627,12 +1648,7 @@ x86Encode(virArch arch,
!(*vendor = virCPUx86MakeData(arch, &data_vendor)))
goto error;
- ret = 0;
-
-cleanup:
- x86MapFree(map);
-
- return ret;
+ return 0;
error:
virCPUx86DataFree(data_forced);
@@ -1653,7 +1669,7 @@ error:
x86FreeCPUData(*forbidden);
if (vendor)
x86FreeCPUData(*vendor);
- goto cleanup;
+ return -1;
}
@@ -1748,7 +1764,7 @@ x86Baseline(virCPUDefPtr *cpus,
unsigned int nmodels,
unsigned int flags)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
struct x86_model *base_model = NULL;
virCPUDefPtr cpu = NULL;
size_t i;
@@ -1756,7 +1772,7 @@ x86Baseline(virCPUDefPtr *cpus,
struct x86_model *model = NULL;
bool outputVendor = true;
- if (!(map = x86LoadMap()))
+ if (!(map = virCPUx86GetMap()))
goto error;
if (!(base_model = x86ModelFromCPU(cpus[0], map, VIR_CPU_FEATURE_REQUIRE)))
@@ -1837,7 +1853,6 @@ x86Baseline(virCPUDefPtr *cpus,
cleanup:
x86ModelFree(base_model);
- x86MapFree(map);
return cpu;
@@ -1855,10 +1870,10 @@ x86UpdateCustom(virCPUDefPtr guest,
{
int ret = -1;
size_t i;
- struct x86_map *map;
+ const struct x86_map *map;
struct x86_model *host_model = NULL;
- if (!(map = x86LoadMap()) ||
+ if (!(map = virCPUx86GetMap()) ||
!(host_model = x86ModelFromCPU(host, map, VIR_CPU_FEATURE_REQUIRE)))
goto cleanup;
@@ -1890,7 +1905,6 @@ x86UpdateCustom(virCPUDefPtr guest,
ret = 0;
cleanup:
- x86MapFree(map);
x86ModelFree(host_model);
return ret;
}
@@ -1960,11 +1974,11 @@ static int
x86HasFeature(const virCPUData *data,
const char *name)
{
- struct x86_map *map;
+ const struct x86_map *map;
struct x86_feature *feature;
int ret = -1;
- if (!(map = x86LoadMap()))
+ if (!(map = virCPUx86GetMap()))
return -1;
if (!(feature = x86FeatureFind(map, name)))
@@ -1973,7 +1987,6 @@ x86HasFeature(const virCPUData *data,
ret = x86DataIsSubset(data->data.x86, feature->data) ? 1 : 0;
cleanup:
- x86MapFree(map);
return ret;
}
--
1.8.3.2