Until now the map was loaded from the XML definition file every time a
operation on the flags was requested. With the introduciton of one shot
initializers we can store the definition forever (as it will never
change) instead of parsing it over and over again.
---
src/cpu/cpu_x86.c | 67 ++++++++++++++++++++++++++++++-------------------------
1 file changed, 36 insertions(+), 31 deletions(-)
diff --git a/src/cpu/cpu_x86.c b/src/cpu/cpu_x86.c
index 6260a95..6ce5984 100644
--- a/src/cpu/cpu_x86.c
+++ b/src/cpu/cpu_x86.c
@@ -70,6 +70,10 @@ struct x86_map {
struct x86_model *models;
};
+static struct x86_map* virCPUx86Map = NULL;
+int virCPUx86MapOnceInit(void);
+VIR_ONCE_GLOBAL_INIT(virCPUx86Map);
+
enum compare_result {
SUBSET,
@@ -1064,22 +1068,34 @@ x86MapLoadCallback(enum cpuMapElement element,
}
-static struct x86_map *
-x86LoadMap(void)
+int
+virCPUx86MapOnceInit(void)
{
struct x86_map *map;
if (VIR_ALLOC(map) < 0)
- return NULL;
+ return -1;
if (cpuMapLoad("x86", x86MapLoadCallback, map) < 0)
goto error;
- return map;
+ virCPUx86Map = map;
+
+ return 0;
error:
x86MapFree(map);
- return NULL;
+ return -1;
+}
+
+
+static const struct x86_map *
+virCPUx86GetMap(void)
+{
+ if (virCPUx86MapInitialize() < 0)
+ return NULL;
+
+ return virCPUx86Map;
}
@@ -1194,7 +1210,7 @@ x86Compute(virCPUDefPtr host,
virCPUDataPtr *guest,
char **message)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
struct x86_model *host_model = NULL;
struct x86_model *cpu_force = NULL;
struct x86_model *cpu_require = NULL;
@@ -1247,7 +1263,7 @@ x86Compute(virCPUDefPtr host,
return VIR_CPU_COMPARE_INCOMPATIBLE;
}
- if (!(map = x86LoadMap()) ||
+ if (!(map = virCPUx86GetMap()) ||
!(host_model = x86ModelFromCPU(host, map, VIR_CPU_FEATURE_REQUIRE)) ||
!(cpu_force = x86ModelFromCPU(cpu, map, VIR_CPU_FEATURE_FORCE)) ||
!(cpu_require = x86ModelFromCPU(cpu, map, VIR_CPU_FEATURE_REQUIRE)) ||
@@ -1323,7 +1339,6 @@ x86Compute(virCPUDefPtr host,
}
cleanup:
- x86MapFree(map);
x86ModelFree(host_model);
x86ModelFree(diff);
x86ModelFree(cpu_force);
@@ -1362,7 +1377,7 @@ x86GuestData(virCPUDefPtr host,
static int
x86AddFeatures(virCPUDefPtr cpu,
- struct x86_map *map)
+ const struct x86_map *map)
{
const struct x86_model *candidate;
const struct x86_feature *feature = map->features;
@@ -1398,7 +1413,7 @@ x86Decode(virCPUDefPtr cpu,
unsigned int flags)
{
int ret = -1;
- struct x86_map *map;
+ const struct x86_map *map;
const struct x86_model *candidate;
virCPUDefPtr cpuCandidate;
virCPUDefPtr cpuModel = NULL;
@@ -1406,7 +1421,7 @@ x86Decode(virCPUDefPtr cpu,
virCheckFlags(VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES, -1);
- if (data == NULL || (map = x86LoadMap()) == NULL)
+ if (!data || !(map = virCPUx86GetMap()))
return -1;
candidate = map->models;
@@ -1490,7 +1505,6 @@ x86Decode(virCPUDefPtr cpu,
ret = 0;
out:
- x86MapFree(map);
virCPUDefFree(cpuModel);
return ret;
@@ -1537,14 +1551,13 @@ x86Encode(virArch arch,
virCPUDataPtr *forbidden,
virCPUDataPtr *vendor)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
virCPUx86Data *data_forced = NULL;
virCPUx86Data *data_required = NULL;
virCPUx86Data *data_optional = NULL;
virCPUx86Data *data_disabled = NULL;
virCPUx86Data *data_forbidden = NULL;
virCPUx86Data *data_vendor = NULL;
- int ret = -1;
if (forced)
*forced = NULL;
@@ -1559,7 +1572,7 @@ x86Encode(virArch arch,
if (vendor)
*vendor = NULL;
- if ((map = x86LoadMap()) == NULL)
+ if ((map = virCPUx86GetMap()) == NULL)
goto error;
if (forced) {
@@ -1627,12 +1640,7 @@ x86Encode(virArch arch,
!(*vendor = virCPUx86MakeData(arch, &data_vendor)))
goto error;
- ret = 0;
-
-cleanup:
- x86MapFree(map);
-
- return ret;
+ return 0;
error:
virCPUx86DataFree(data_forced);
@@ -1653,7 +1661,7 @@ error:
x86FreeCPUData(*forbidden);
if (vendor)
x86FreeCPUData(*vendor);
- goto cleanup;
+ return -1;
}
@@ -1748,7 +1756,7 @@ x86Baseline(virCPUDefPtr *cpus,
unsigned int nmodels,
unsigned int flags)
{
- struct x86_map *map = NULL;
+ const struct x86_map *map = NULL;
struct x86_model *base_model = NULL;
virCPUDefPtr cpu = NULL;
size_t i;
@@ -1756,7 +1764,7 @@ x86Baseline(virCPUDefPtr *cpus,
struct x86_model *model = NULL;
bool outputVendor = true;
- if (!(map = x86LoadMap()))
+ if (!(map = virCPUx86GetMap()))
goto error;
if (!(base_model = x86ModelFromCPU(cpus[0], map, VIR_CPU_FEATURE_REQUIRE)))
@@ -1837,7 +1845,6 @@ x86Baseline(virCPUDefPtr *cpus,
cleanup:
x86ModelFree(base_model);
- x86MapFree(map);
return cpu;
@@ -1855,10 +1862,10 @@ x86UpdateCustom(virCPUDefPtr guest,
{
int ret = -1;
size_t i;
- struct x86_map *map;
+ const struct x86_map *map;
struct x86_model *host_model = NULL;
- if (!(map = x86LoadMap()) ||
+ if (!(map = virCPUx86GetMap()) ||
!(host_model = x86ModelFromCPU(host, map, VIR_CPU_FEATURE_REQUIRE)))
goto cleanup;
@@ -1890,7 +1897,6 @@ x86UpdateCustom(virCPUDefPtr guest,
ret = 0;
cleanup:
- x86MapFree(map);
x86ModelFree(host_model);
return ret;
}
@@ -1960,11 +1966,11 @@ static int
x86HasFeature(const virCPUDataPtr data,
const char *name)
{
- struct x86_map *map;
+ const struct x86_map *map;
struct x86_feature *feature;
int ret = -1;
- if (!(map = x86LoadMap()))
+ if (!(map = virCPUx86GetMap()))
return -1;
if (!(feature = x86FeatureFind(map, name)))
@@ -1973,7 +1979,6 @@ x86HasFeature(const virCPUDataPtr data,
ret = x86DataIsSubset(data->data.x86, feature->data) ? 1 : 0;
cleanup:
- x86MapFree(map);
return ret;
}
--
1.8.3.2