Devel
Threads by month
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2005 -----
- December
February 2017
- 71 participants
- 225 discussions
Now that we have some qemuSecurity wrappers over
virSecurityManager APIs, lets make sure everybody sticks with
them. We have them for a reason and calling virSecurityManager
API directly instead of wrapper may lead into accidentally
labelling a file on the host instead of namespace.
Signed-off-by: Michal Privoznik <mprivozn(a)redhat.com>
---
This is an alternative approach to:
https://www.redhat.com/archives/libvir-list/2017-February/msg00271.html
cfg.mk | 5 ++++
src/qemu/qemu_command.c | 7 +++---
src/qemu/qemu_conf.c | 9 ++++---
src/qemu/qemu_domain.c | 17 ++++++-------
src/qemu/qemu_driver.c | 63 ++++++++++++++++++++++-------------------------
src/qemu/qemu_hotplug.c | 4 +--
src/qemu/qemu_migration.c | 13 +++++-----
src/qemu/qemu_process.c | 61 ++++++++++++++++++++++-----------------------
src/qemu/qemu_security.h | 32 ++++++++++++++++++++++++
9 files changed, 122 insertions(+), 89 deletions(-)
diff --git a/cfg.mk b/cfg.mk
index 69e3f3a1a..489fda8ea 100644
--- a/cfg.mk
+++ b/cfg.mk
@@ -983,6 +983,11 @@ sc_prohibit_sysconf_pagesize:
halt='use virGetSystemPageSize[KB] instead of sysconf(_SC_PAGESIZE)' \
$(_sc_search_regexp)
+sc_prohibit_virSecurity:
+ @grep -P 'virSecurityManager(?!Ptr)' $$($(VC_LIST_EXCEPT) | grep '^src/qemu/' | \
+ grep -v '^src/qemu/qemu_security') && \
+ { echo '$(ME): prefer qemuSecurity wrappers' 1>&2; exit 1; } || :
+
sc_prohibit_pthread_create:
@prohibit='\bpthread_create\b' \
exclude='sc_prohibit_pthread_create' \
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index c00a47a91..110540ba7 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -28,6 +28,7 @@
#include "qemu_capabilities.h"
#include "qemu_interface.h"
#include "qemu_alias.h"
+#include "qemu_security.h"
#include "cpu/cpu.h"
#include "dirname.h"
#include "viralloc.h"
@@ -8321,8 +8322,8 @@ qemuBuildInterfaceCommandLine(virQEMUDriverPtr driver,
}
for (i = 0; i < tapfdSize; i++) {
- if (virSecurityManagerSetTapFDLabel(driver->securityManager,
- def, tapfd[i]) < 0)
+ if (qemuSecuritySetTapFDLabel(driver->securityManager,
+ def, tapfd[i]) < 0)
goto cleanup;
virCommandPassFD(cmd, tapfd[i],
VIR_COMMAND_PASS_FD_CLOSE_PARENT);
@@ -8403,7 +8404,7 @@ qemuBuildInterfaceCommandLine(virQEMUDriverPtr driver,
/* NOTE: Not using const virDomainDef here since eventually a call is made
- * into virSecurityManagerSetTapFDLabel which calls it's driver
+ * into qemuSecuritySetTapFDLabel which calls it's driver
* API domainSetSecurityTapFDLabel that doesn't use the const format.
*/
static int
diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index 0223a95d2..4fc0dee39 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -38,6 +38,7 @@
#include "qemu_conf.h"
#include "qemu_capabilities.h"
#include "qemu_domain.h"
+#include "qemu_security.h"
#include "viruuid.h"
#include "virbuffer.h"
#include "virconf.h"
@@ -904,7 +905,7 @@ virCapsPtr virQEMUDriverCreateCapabilities(virQEMUDriverPtr driver)
}
/* access sec drivers and create a sec model for each one */
- if (!(sec_managers = virSecurityManagerGetNested(driver->securityManager)))
+ if (!(sec_managers = qemuSecurityGetNested(driver->securityManager)))
goto error;
/* calculate length */
@@ -917,14 +918,14 @@ virCapsPtr virQEMUDriverCreateCapabilities(virQEMUDriverPtr driver)
for (i = 0; sec_managers[i]; i++) {
virCapsHostSecModelPtr sm = &caps->host.secModels[i];
- doi = virSecurityManagerGetDOI(sec_managers[i]);
- model = virSecurityManagerGetModel(sec_managers[i]);
+ doi = qemuSecurityGetDOI(sec_managers[i]);
+ model = qemuSecurityGetModel(sec_managers[i]);
if (VIR_STRDUP(sm->model, model) < 0 ||
VIR_STRDUP(sm->doi, doi) < 0)
goto error;
for (j = 0; j < ARRAY_CARDINALITY(virtTypes); j++) {
- lbl = virSecurityManagerGetBaseLabel(sec_managers[i], virtTypes[j]);
+ lbl = qemuSecurityGetBaseLabel(sec_managers[i], virtTypes[j]);
type = virDomainVirtTypeToString(virtTypes[j]);
if (lbl &&
virCapabilitiesHostSecModelAddBaseLabel(sm, type, lbl) < 0)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index f62bf8f1d..2c827ea2c 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -588,8 +588,8 @@ qemuDomainWriteMasterKeyFile(virQEMUDriverPtr driver,
goto cleanup;
}
- if (virSecurityManagerDomainSetPathLabel(driver->securityManager,
- vm->def, path) < 0)
+ if (qemuSecurityDomainSetPathLabel(driver->securityManager,
+ vm->def, path) < 0)
goto cleanup;
ret = 0;
@@ -2688,7 +2688,7 @@ qemuDomainDefPostParse(virDomainDefPtr def,
if (qemuDomainRecheckInternalPaths(def, cfg, parseFlags) < 0)
goto cleanup;
- if (virSecurityManagerVerify(driver->securityManager, def) < 0)
+ if (qemuSecurityVerify(driver->securityManager, def) < 0)
goto cleanup;
if (qemuDomainDefVcpusPostParse(def) < 0)
@@ -7257,8 +7257,7 @@ qemuDomainSetupDev(virQEMUDriverPtr driver,
VIR_DEBUG("Setting up /dev/ for domain %s", vm->def->name);
- mount_options = virSecurityManagerGetMountOptions(driver->securityManager,
- vm->def);
+ mount_options = qemuSecurityGetMountOptions(driver->securityManager, vm->def);
if (!mount_options &&
VIR_STRDUP(mount_options, "") < 0)
@@ -7679,7 +7678,7 @@ qemuDomainAttachDeviceMknodHelper(pid_t pid ATTRIBUTE_UNUSED,
bool delDevice = false;
bool isLink = S_ISLNK(data->sb.st_mode);
- virSecurityManagerPostFork(data->driver->securityManager);
+ qemuSecurityPostFork(data->driver->securityManager);
if (virFileMakeParentPath(data->file) < 0) {
virReportSystemError(errno,
@@ -7841,16 +7840,16 @@ qemuDomainAttachDeviceMknodRecursive(virQEMUDriverPtr driver,
#endif
if (STRPREFIX(file, DEVPREFIX)) {
- if (virSecurityManagerPreFork(driver->securityManager) < 0)
+ if (qemuSecurityPreFork(driver->securityManager) < 0)
goto cleanup;
if (virProcessRunInMountNamespace(vm->pid,
qemuDomainAttachDeviceMknodHelper,
&data) < 0) {
- virSecurityManagerPostFork(driver->securityManager);
+ qemuSecurityPostFork(driver->securityManager);
goto cleanup;
}
- virSecurityManagerPostFork(driver->securityManager);
+ qemuSecurityPostFork(driver->securityManager);
}
if (isLink &&
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 89bc833de..096fe36fe 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -405,26 +405,26 @@ qemuSecurityInit(virQEMUDriverPtr driver)
cfg->securityDriverNames[0]) {
names = cfg->securityDriverNames;
while (names && *names) {
- if (!(mgr = virSecurityManagerNew(*names,
- QEMU_DRIVER_NAME,
- flags)))
+ if (!(mgr = qemuSecurityNew(*names,
+ QEMU_DRIVER_NAME,
+ flags)))
goto error;
if (!stack) {
- if (!(stack = virSecurityManagerNewStack(mgr)))
+ if (!(stack = qemuSecurityNewStack(mgr)))
goto error;
} else {
- if (virSecurityManagerStackAddNested(stack, mgr) < 0)
+ if (qemuSecurityStackAddNested(stack, mgr) < 0)
goto error;
}
mgr = NULL;
names++;
}
} else {
- if (!(mgr = virSecurityManagerNew(NULL,
- QEMU_DRIVER_NAME,
- flags)))
+ if (!(mgr = qemuSecurityNew(NULL,
+ QEMU_DRIVER_NAME,
+ flags)))
goto error;
- if (!(stack = virSecurityManagerNewStack(mgr)))
+ if (!(stack = qemuSecurityNewStack(mgr)))
goto error;
mgr = NULL;
}
@@ -432,17 +432,17 @@ qemuSecurityInit(virQEMUDriverPtr driver)
if (virQEMUDriverIsPrivileged(driver)) {
if (cfg->dynamicOwnership)
flags |= VIR_SECURITY_MANAGER_DYNAMIC_OWNERSHIP;
- if (!(mgr = virSecurityManagerNewDAC(QEMU_DRIVER_NAME,
- cfg->user,
- cfg->group,
- flags,
- qemuSecurityChownCallback)))
+ if (!(mgr = qemuSecurityNewDAC(QEMU_DRIVER_NAME,
+ cfg->user,
+ cfg->group,
+ flags,
+ qemuSecurityChownCallback)))
goto error;
if (!stack) {
- if (!(stack = virSecurityManagerNewStack(mgr)))
+ if (!(stack = qemuSecurityNewStack(mgr)))
goto error;
} else {
- if (virSecurityManagerStackAddNested(stack, mgr) < 0)
+ if (qemuSecurityStackAddNested(stack, mgr) < 0)
goto error;
}
mgr = NULL;
@@ -3088,7 +3088,7 @@ qemuDomainSaveMemory(virQEMUDriverPtr driver,
if (fd < 0)
goto cleanup;
- if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
+ if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
goto cleanup;
if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags)))
@@ -3553,8 +3553,7 @@ static int qemuDumpToFd(virQEMUDriverPtr driver, virDomainObjPtr vm,
return -1;
}
- if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
- fd) < 0)
+ if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
return -1;
VIR_FREE(priv->job.current);
@@ -3846,7 +3845,7 @@ qemuDomainScreenshot(virDomainPtr dom,
}
unlink_tmp = true;
- virSecurityManagerSetSavedStateLabel(driver->securityManager, vm->def, tmp);
+ qemuSecuritySetSavedStateLabel(driver->securityManager, vm->def, tmp);
qemuDomainObjEnterMonitor(driver, vm);
if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
@@ -5928,8 +5927,8 @@ static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr secl
* QEMU monitor hasn't seen SIGHUP/ERR on poll().
*/
if (virDomainObjIsActive(vm)) {
- if (virSecurityManagerGetProcessLabel(driver->securityManager,
- vm->def, vm->pid, seclabel) < 0) {
+ if (qemuSecurityGetProcessLabel(driver->securityManager,
+ vm->def, vm->pid, seclabel) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Failed to get security label"));
goto cleanup;
@@ -5973,8 +5972,7 @@ static int qemuDomainGetSecurityLabelList(virDomainPtr dom,
ret = 0;
} else {
int len = 0;
- virSecurityManagerPtr* mgrs = virSecurityManagerGetNested(
- driver->securityManager);
+ virSecurityManagerPtr* mgrs = qemuSecurityGetNested(driver->securityManager);
if (!mgrs)
goto cleanup;
@@ -5990,8 +5988,8 @@ static int qemuDomainGetSecurityLabelList(virDomainPtr dom,
/* Fill the array */
for (i = 0; i < len; i++) {
- if (virSecurityManagerGetProcessLabel(mgrs[i], vm->def, vm->pid,
- &(*seclabels)[i]) < 0) {
+ if (qemuSecurityGetProcessLabel(mgrs[i], vm->def, vm->pid,
+ &(*seclabels)[i]) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Failed to get security label"));
VIR_FREE(mgrs);
@@ -6369,8 +6367,8 @@ qemuDomainSaveImageStartVM(virConnectPtr conn,
cleanup:
virCommandFree(cmd);
VIR_FREE(errbuf);
- if (virSecurityManagerRestoreSavedStateLabel(driver->securityManager,
- vm->def, path) < 0)
+ if (qemuSecurityRestoreSavedStateLabel(driver->securityManager,
+ vm->def, path) < 0)
VIR_WARN("failed to restore save state label on %s", path);
virObjectUnref(cfg);
return ret;
@@ -11196,7 +11194,7 @@ qemuDomainMemoryPeek(virDomainPtr dom,
goto endjob;
}
- virSecurityManagerSetSavedStateLabel(driver->securityManager, vm->def, tmp);
+ qemuSecuritySetSavedStateLabel(driver->securityManager, vm->def, tmp);
priv = vm->privateData;
qemuDomainObjEnterMonitor(driver, vm);
@@ -17064,8 +17062,7 @@ qemuDomainOpenGraphics(virDomainPtr dom,
goto endjob;
}
- if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
- fd) < 0)
+ if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
goto endjob;
qemuDomainObjEnterMonitor(driver, vm);
@@ -17129,13 +17126,13 @@ qemuDomainOpenGraphicsFD(virDomainPtr dom,
goto cleanup;
}
- if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
+ if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
if (socketpair(PF_UNIX, SOCK_STREAM, 0, pair) < 0)
goto cleanup;
- if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0)
+ if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 2f209f12b..b99b0e9fb 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -1134,8 +1134,8 @@ qemuDomainAttachNetDevice(virQEMUDriverPtr driver,
}
for (i = 0; i < tapfdSize; i++) {
- if (virSecurityManagerSetTapFDLabel(driver->securityManager,
- vm->def, tapfd[i]) < 0)
+ if (qemuSecuritySetTapFDLabel(driver->securityManager,
+ vm->def, tapfd[i]) < 0)
goto cleanup;
}
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 0f4a6cf21..c40cb1391 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -40,6 +40,7 @@
#include "qemu_cgroup.h"
#include "qemu_hotplug.h"
#include "qemu_blockjob.h"
+#include "qemu_security.h"
#include "domain_audit.h"
#include "virlog.h"
@@ -4597,7 +4598,7 @@ qemuMigrationConnect(virQEMUDriverPtr driver,
spec->destType = MIGRATION_DEST_FD;
spec->dest.fd.qemu = -1;
- if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
+ if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
if (virNetSocketNewConnectTCP(host, port,
AF_UNSPEC,
@@ -4605,7 +4606,7 @@ qemuMigrationConnect(virQEMUDriverPtr driver,
spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
virObjectUnref(sock);
}
- if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
+ if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0 ||
spec->dest.fd.qemu == -1)
goto cleanup;
@@ -5076,8 +5077,8 @@ static int doTunnelMigrate(virQEMUDriverPtr driver,
spec.dest.fd.local = fds[0];
}
if (spec.dest.fd.qemu == -1 ||
- virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
- spec.dest.fd.qemu) < 0) {
+ qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
+ spec.dest.fd.qemu) < 0) {
virReportSystemError(errno, "%s",
_("cannot create pipe for tunnelled migration"));
goto cleanup;
@@ -6463,8 +6464,8 @@ qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
* doesn't have to open() the file, so while we still have to
* grant SELinux access, we can do it on fd and avoid cleanup
* later, as well as skip futzing with cgroup. */
- if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
- compressor ? pipeFD[1] : fd) < 0)
+ if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
+ compressor ? pipeFD[1] : fd) < 0)
goto cleanup;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 92fa69b3c..5c44e565b 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -221,8 +221,7 @@ qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm)
return 0;
}
- if (virSecurityManagerSetDaemonSocketLabel(driver->securityManager,
- vm->def) < 0) {
+ if (qemuSecuritySetDaemonSocketLabel(driver->securityManager, vm->def) < 0) {
VIR_ERROR(_("Failed to set security context for agent for %s"),
vm->def->name);
goto cleanup;
@@ -250,8 +249,7 @@ qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm)
return -1;
}
- if (virSecurityManagerClearSocketLabel(driver->securityManager,
- vm->def) < 0) {
+ if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) {
VIR_ERROR(_("Failed to clear security context for agent for %s"),
vm->def->name);
qemuAgentClose(agent);
@@ -1657,8 +1655,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob,
int ret = -1;
qemuMonitorPtr mon = NULL;
- if (virSecurityManagerSetDaemonSocketLabel(driver->securityManager,
- vm->def) < 0) {
+ if (qemuSecuritySetDaemonSocketLabel(driver->securityManager, vm->def) < 0) {
VIR_ERROR(_("Failed to set security context for monitor for %s"),
vm->def->name);
return -1;
@@ -1695,7 +1692,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob,
}
priv->mon = mon;
- if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0) {
+ if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) {
VIR_ERROR(_("Failed to clear security context for monitor for %s"),
vm->def->name);
return -1;
@@ -2638,7 +2635,7 @@ static int qemuProcessHook(void *data)
* protected across fork()
*/
- virSecurityManagerPostFork(h->driver->securityManager);
+ qemuSecurityPostFork(h->driver->securityManager);
/* Some later calls want pid present */
h->vm->pid = getpid();
@@ -2651,7 +2648,7 @@ static int qemuProcessHook(void *data)
* sockets the lock driver opens that we don't want
* labelled. So far we're ok though.
*/
- if (virSecurityManagerSetSocketLabel(h->driver->securityManager, h->vm->def) < 0)
+ if (qemuSecuritySetSocketLabel(h->driver->securityManager, h->vm->def) < 0)
goto cleanup;
if (virDomainLockProcessStart(h->driver->lockManager,
h->cfg->uri,
@@ -2660,7 +2657,7 @@ static int qemuProcessHook(void *data)
true,
&fd) < 0)
goto cleanup;
- if (virSecurityManagerClearSocketLabel(h->driver->securityManager, h->vm->def) < 0)
+ if (qemuSecurityClearSocketLabel(h->driver->securityManager, h->vm->def) < 0)
goto cleanup;
if (qemuDomainBuildNamespace(h->driver, h->vm) < 0)
@@ -3260,8 +3257,8 @@ qemuProcessBuildDestroyHugepagesPath(virQEMUDriverPtr driver,
goto cleanup;
}
- if (virSecurityManagerDomainSetPathLabel(driver->securityManager,
- vm->def, hugepagePath) < 0) {
+ if (qemuSecurityDomainSetPathLabel(driver->securityManager,
+ vm->def, hugepagePath) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Unable to set huge path in security driver"));
goto cleanup;
@@ -3437,13 +3434,13 @@ qemuProcessReconnect(void *opaque)
/* if domain requests security driver we haven't loaded, report error, but
* do not kill the domain
*/
- ignore_value(virSecurityManagerCheckAllLabel(driver->securityManager,
- obj->def));
+ ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
+ obj->def));
if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
goto error;
- if (virSecurityManagerReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
+ if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
goto error;
if (qemuProcessNotifyNets(obj->def) < 0)
@@ -4451,8 +4448,8 @@ qemuProcessMakeDir(virQEMUDriverPtr driver,
goto cleanup;
}
- if (virSecurityManagerDomainSetPathLabel(driver->securityManager,
- vm->def, path) < 0)
+ if (qemuSecurityDomainSetPathLabel(driver->securityManager,
+ vm->def, path) < 0)
goto cleanup;
ret = 0;
@@ -4647,7 +4644,7 @@ qemuProcessStartValidate(virQEMUDriverPtr driver,
}
VIR_DEBUG("Checking domain and device security labels");
- if (virSecurityManagerCheckAllLabel(driver->securityManager, vm->def) < 0)
+ if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
return -1;
}
@@ -5202,7 +5199,7 @@ qemuProcessPrepareDomain(virConnectPtr conn,
/* If you are using a SecurityDriver with dynamic labelling,
then generate a security label for isolation */
VIR_DEBUG("Generating domain security label (if required)");
- if (virSecurityManagerGenLabel(driver->securityManager, vm->def) < 0) {
+ if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0) {
virDomainAuditSecurityLabel(vm, false);
goto cleanup;
}
@@ -5513,8 +5510,8 @@ qemuProcessLaunch(virConnectPtr conn,
virCommandSetUmask(cmd, 0x002);
VIR_DEBUG("Setting up security labelling");
- if (virSecurityManagerSetChildProcessLabel(driver->securityManager,
- vm->def, cmd) < 0)
+ if (qemuSecuritySetChildProcessLabel(driver->securityManager,
+ vm->def, cmd) < 0)
goto cleanup;
virCommandSetOutputFD(cmd, &logfile);
@@ -5524,10 +5521,10 @@ qemuProcessLaunch(virConnectPtr conn,
virCommandDaemonize(cmd);
virCommandRequireHandshake(cmd);
- if (virSecurityManagerPreFork(driver->securityManager) < 0)
+ if (qemuSecurityPreFork(driver->securityManager) < 0)
goto cleanup;
rv = virCommandRun(cmd, NULL);
- virSecurityManagerPostFork(driver->securityManager);
+ qemuSecurityPostFork(driver->securityManager);
/* wait for qemu process to show up */
if (rv == 0) {
@@ -5604,8 +5601,8 @@ qemuProcessLaunch(virConnectPtr conn,
goto cleanup;
}
if (S_ISFIFO(stdin_sb.st_mode) &&
- virSecurityManagerSetImageFDLabel(driver->securityManager,
- vm->def, incoming->fd) < 0)
+ qemuSecuritySetImageFDLabel(driver->securityManager,
+ vm->def, incoming->fd) < 0)
goto cleanup;
}
@@ -6122,7 +6119,7 @@ void qemuProcessStop(virQEMUDriverPtr driver,
qemuSecurityRestoreAllLabel(driver, vm,
!!(flags & VIR_QEMU_PROCESS_STOP_MIGRATED));
- virSecurityManagerReleaseLabel(driver->securityManager, vm->def);
+ qemuSecurityReleaseLabel(driver->securityManager, vm->def);
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDeviceDef dev;
@@ -6366,13 +6363,13 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
vm->pid = pid;
VIR_DEBUG("Detect security driver config");
- sec_managers = virSecurityManagerGetNested(driver->securityManager);
+ sec_managers = qemuSecurityGetNested(driver->securityManager);
if (sec_managers == NULL)
goto error;
for (i = 0; sec_managers[i]; i++) {
seclabelgen = false;
- model = virSecurityManagerGetModel(sec_managers[i]);
+ model = qemuSecurityGetModel(sec_managers[i]);
seclabeldef = virDomainDefGetSecurityLabelDef(vm->def, model);
if (seclabeldef == NULL) {
if (!(seclabeldef = virSecurityLabelDefNew(model)))
@@ -6382,8 +6379,8 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
seclabeldef->type = VIR_DOMAIN_SECLABEL_STATIC;
if (VIR_ALLOC(seclabel) < 0)
goto error;
- if (virSecurityManagerGetProcessLabel(sec_managers[i],
- vm->def, vm->pid, seclabel) < 0)
+ if (qemuSecurityGetProcessLabel(sec_managers[i], vm->def,
+ vm->pid, seclabel) < 0)
goto error;
if (VIR_STRDUP(seclabeldef->model, model) < 0)
@@ -6400,9 +6397,9 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
}
}
- if (virSecurityManagerCheckAllLabel(driver->securityManager, vm->def) < 0)
+ if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
goto error;
- if (virSecurityManagerGenLabel(driver->securityManager, vm->def) < 0)
+ if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0)
goto error;
if (qemuDomainPerfRestart(vm) < 0)
diff --git a/src/qemu/qemu_security.h b/src/qemu/qemu_security.h
index 54638908d..d86db3f6b 100644
--- a/src/qemu/qemu_security.h
+++ b/src/qemu/qemu_security.h
@@ -28,6 +28,7 @@
# include "qemu_conf.h"
# include "domain_conf.h"
+# include "security/security_manager.h"
int qemuSecuritySetAllLabel(virQEMUDriverPtr driver,
virDomainObjPtr vm,
@@ -60,4 +61,35 @@ int qemuSecuritySetHostdevLabel(virQEMUDriverPtr driver,
int qemuSecurityRestoreHostdevLabel(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainHostdevDefPtr hostdev);
+
+/* Please note that for these APIs there is no wrapper yet. Do NOT blindly add
+ * new APIs here. If an API can touch a /dev file add a proper wrapper instead.
+ */
+# define qemuSecurityCheckAllLabel virSecurityManagerCheckAllLabel
+# define qemuSecurityClearSocketLabel virSecurityManagerClearSocketLabel
+# define qemuSecurityDomainSetPathLabel virSecurityManagerDomainSetPathLabel
+# define qemuSecurityGenLabel virSecurityManagerGenLabel
+# define qemuSecurityGetBaseLabel virSecurityManagerGetBaseLabel
+# define qemuSecurityGetDOI virSecurityManagerGetDOI
+# define qemuSecurityGetModel virSecurityManagerGetModel
+# define qemuSecurityGetMountOptions virSecurityManagerGetMountOptions
+# define qemuSecurityGetNested virSecurityManagerGetNested
+# define qemuSecurityGetProcessLabel virSecurityManagerGetProcessLabel
+# define qemuSecurityNew virSecurityManagerNew
+# define qemuSecurityNewDAC virSecurityManagerNewDAC
+# define qemuSecurityNewStack virSecurityManagerNewStack
+# define qemuSecurityPostFork virSecurityManagerPostFork
+# define qemuSecurityPreFork virSecurityManagerPreFork
+# define qemuSecurityReleaseLabel virSecurityManagerReleaseLabel
+# define qemuSecurityReserveLabel virSecurityManagerReserveLabel
+# define qemuSecurityRestoreSavedStateLabel virSecurityManagerRestoreSavedStateLabel
+# define qemuSecuritySetChildProcessLabel virSecurityManagerSetChildProcessLabel
+# define qemuSecuritySetDaemonSocketLabel virSecurityManagerSetDaemonSocketLabel
+# define qemuSecuritySetImageFDLabel virSecurityManagerSetImageFDLabel
+# define qemuSecuritySetSavedStateLabel virSecurityManagerSetSavedStateLabel
+# define qemuSecuritySetSocketLabel virSecurityManagerSetSocketLabel
+# define qemuSecuritySetTapFDLabel virSecurityManagerSetTapFDLabel
+# define qemuSecurityStackAddNested virSecurityManagerStackAddNested
+# define qemuSecurityVerify virSecurityManagerVerify
+
#endif /* __QEMU_SECURITY_H__ */
--
2.11.0
4
6
Addressed comment from v9 -> v8
Marcelo:
* New public API to query cache usage
Eli:
* Fix core dump while multiple tasks are added.
Addressed comment from v8 -> v7
Martin:
* Patch subject prefix.
* Move some of cpu related information to virhostcpu.c.
* Fix some memory leak in src/utils/resctrl.c
Martin & Marcelo:
* Don't remove directories which are not maintained by libvirt.
Addressed comment from v7 -> v6
Marcelo:
* Fix flock usage while VM initialization.
Addressed comment from v6 -> v5
Marcelo:
* Support other APPs to operate /sys/fs/resctrl at same time
Libvirt will scan /sys/fs/resctrl again before doing cache allocation.
patch 10 will address this.
Addressed comment from v4 -> v5:
Marcelo:
* Several typos
* Use flock instead of virFileLock
Addressed comment from v3 -> v4:
Daniel & Marcelo:
* Added concurrence support
Addressed comment from v2 -> v3:
Daniel:
* Fixed coding style, passed `make check` and `make syntax-check`
* Variables renaming and move from header file to c file.
* For locking/mutex support, no progress.
There are some discussion from mailing list, but I can not find a better
way to add locking support without performance impact.
I'll explain the process and please help to advice what shoud we do.
VM create:
1) Get the cache left value on each bank of the host. This should be
shared amount all VMs.
2) Calculate the schemata on the bank based on all created resctrl
domain's schemata
3) Calculate the default schemata by scaning all domain's schemata.
4) Flush default schemata to /sys/fs/resctrl/schemata
VM destroy:
1) Remove the resctrl domain of that VM
2) Recalculate default schemata
3) Flush default schemata to /sys/fs/resctrl/schemata
The key point is that all VMs shares /sys/fs/resctrl/schemata, and
when a VM create a resctrl domain, the schemata of that VM depends on
the default schemata and all other exsited schematas. So a global
mutex is reqired.
Before calculate a schemata or update default schemata, libvirt
should gain this global mutex.
I will try to think more about how to support this gracefully in next
patch set.
Marcelo:
* Added vcpu support for cachetune, this will allow user to define which
vcpu using which cache allocation bank.
<cachetune id='0' host_id=0 size='3072' unit='KiB' vcpus='0,1'/>
vcpus is a cpumap, the vcpu pids will be added to tasks file
* Added cdp compatible, user can specify l3 cache even host enable cdp.
See patch 8.
On a cdp enabled host, specify l3code/l3data by
<cachetune id='0' host_id='0' type='l3' size='3072' unit='KiB'/>
This will create a schemata like:
L3data:0=0xff00;...
L3code:0=0xff00;...
* Would you please help to test if the functions work.
Martin:
* Xml test case, I have no time to work on this yet, would you please
show me an example, would like to amend it later.
This series patches are for supportting CAT featues, which also
called cache tune in libvirt.
First to expose cache information which could be tuned in capabilites XML.
Then add new domain xml element support to add cacahe bank which will apply
on this libvirt domain.
This series patches add a util file `resctrl.c/h`, an interface to talk with
linux kernel's system fs.
There are still one TODO left:
1. Expose a new public interface to set cachetune lively.
Some discussion about this feature support can be found from:
https://www.redhat.com/archives/libvir-list/2017-January/msg00644.html
Eli Qiao (12):
Resctrl: Add some utils functions
Resctrl: expose cache information to capabilities
Resctrl: Add new xml element to support cache tune
Resctrl: Add private interfaces to operate cache bank
Qemu: Set cache tune while booting a new domain.
Resctrl: enable l3code/l3data
Resctrl: Make sure l3data/l3code are pairs
Resctrl: Compatible mode for cdp enabled
Resctrl: concurrence support
Resctrl: Scan resctrl before doing cache allocation
Resctrl: Add Public API for nodecachestats
Resctrl: Add nodecachestats
daemon/remote.c | 67 +++
docs/schemas/domaincommon.rng | 46 ++
include/libvirt/libvirt-host.h | 32 ++
include/libvirt/virterror.h | 1 +
po/POTFILES.in | 1 +
src/Makefile.am | 1 +
src/conf/capabilities.c | 56 +++
src/conf/capabilities.h | 23 +
src/conf/domain_conf.c | 182 +++++++
src/conf/domain_conf.h | 19 +
src/driver-hypervisor.h | 7 +
src/libvirt-host.c | 41 ++
src/libvirt_private.syms | 12 +
src/libvirt_public.syms | 1 +
src/nodeinfo.c | 64 +++
src/nodeinfo.h | 1 +
src/qemu/qemu_capabilities.c | 8 +
src/qemu/qemu_driver.c | 18 +
src/qemu/qemu_process.c | 54 ++
src/remote/remote_driver.c | 52 ++
src/remote/remote_protocol.x | 25 +-
src/remote_protocol-structs | 16 +
src/util/virerror.c | 1 +
src/util/virhostcpu.c | 186 ++++++-
src/util/virhostcpu.h | 6 +
src/util/virresctrl.c | 1082 ++++++++++++++++++++++++++++++++++++++++
src/util/virresctrl.h | 96 ++++
tools/virsh-host.c | 49 ++
28 files changed, 2129 insertions(+), 18 deletions(-)
create mode 100644 src/util/virresctrl.c
create mode 100644 src/util/virresctrl.h
--
1.9.1
3
16
06 Mar '17
Note: if you want to try this out, you'll need to make sure
your QEMU binary includes this commit[1]; moreover,
that commit is missing a way for libvirt to detect
whether the new naming scheme is in place, so this
will have to remain an RFC until the QEMU side has
been sorted out.
Patches 1-3 are just setting up the stage.
Patch 4 starts actually introducing the feature, by
relaxing some checks that can no longer be as strict.
Patches 5-10 puts all the boring bits (XML parsing and
formatting, QEMU capabilities) in place.
Patch 11 enables the feature at last.
Patch 12 introduces a single test, a bunch more will be
added before posting this for real (not as RFC).
[1] https://github.com/dgibson/qemu/commit/0a6a9ba2adc48a9a5ea7406d1a5fb3c36f00…
Andrea Bolognani (12):
qemu: Allow qemuBuildControllerDevStr() to return NULL
qemu: Tweak index number checking
conf: Move index number checking to drivers
qemu: Relax pci-root index requirement for pSeries guests
schema: Allow <target index='...'/>
schema: Add 'spapr-pci-host-bridge' controller model
conf: Parse and format <target index='...'/>
conf: Add 'spapr-pci-host-bridge' controller model
qemu: Automatically pick index and model for pci-root controllers
qemu: Introduce QEMU_CAPS_DEVICE_SPAPR_PCI_HOST_BRIDGE
qemu: Use multiple PHBs for pSeries guests
tests: Add tests for pSeries guests with multiple PHBs
docs/schemas/domaincommon.rng | 7 ++
src/bhyve/bhyve_domain.c | 15 +++
src/conf/domain_conf.c | 34 ++++--
src/conf/domain_conf.h | 2 +
src/libxl/libxl_domain.c | 14 +++
src/lxc/lxc_domain.c | 14 +++
src/openvz/openvz_driver.c | 14 +++
src/qemu/qemu_capabilities.c | 2 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_command.c | 126 +++++++++++++++++----
src/qemu/qemu_command.h | 9 +-
src/qemu/qemu_domain.c | 13 +++
src/qemu/qemu_domain_address.c | 47 +++++++-
src/qemu/qemu_hotplug.c | 5 +-
src/uml/uml_driver.c | 14 +++
src/vz/vz_driver.c | 14 +++
src/xen/xen_driver.c | 14 +++
.../qemuargv2xmldata/qemuargv2xml-pseries-disk.xml | 5 +-
.../qemuargv2xml-pseries-nvram.xml | 5 +-
tests/qemucapabilitiesdata/caps_2.6.0.ppc64le.xml | 1 +
.../qemuxml2argv-pseries-phb-simple.args | 26 +++++
.../qemuxml2argv-pseries-phb-simple.xml | 20 ++++
tests/qemuxml2argvtest.c | 5 +
.../qemuxml2xmlout-panic-pseries.xml | 5 +-
.../qemuxml2xmlout-ppc64-usb-controller-legacy.xml | 5 +-
.../qemuxml2xmlout-ppc64-usb-controller.xml | 5 +-
.../qemuxml2xmlout-pseries-nvram.xml | 5 +-
.../qemuxml2xmlout-pseries-panic-missing.xml | 5 +-
.../qemuxml2xmlout-pseries-panic-no-address.xml | 5 +-
...g.xml => qemuxml2xmlout-pseries-phb-simple.xml} | 13 ++-
tests/qemuxml2xmltest.c | 4 +
31 files changed, 407 insertions(+), 47 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-pseries-phb-simple.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-pseries-phb-simple.xml
copy tests/qemuxml2xmloutdata/{qemuxml2xmlout-pseries-panic-missing.xml => qemuxml2xmlout-pseries-phb-simple.xml} (69%)
--
2.7.4
3
17
03 Mar '17
The "hotplugged" property is user visible, but it was never meant
to be set by the user. There are probably multiple ways to break
or crash device code by overriding the property. For example, we
recently fixed a crash in rtc_set_memory() related to the
property (commit 26ef65beab852caf2b1ef4976e3473f2d525164d).
There has been some discussion about making management software
use "hotplugged=on" on migration, to indicate devices that were
hotplugged in the migration source. There were other suggestions
to address this, like including the "hotplugged" field in the
migration stream instead of requiring it to be set propertly.
Whatever solution we choose in the future, this patch disables
setting "hotplugged" explicitly in the command-line by now,
because the ability to set the property is unused, untested, and
undocumented.
Signed-off-by: Eduardo Habkost <ehabkost(a)redhat.com>
---
hw/core/qdev.c | 9 +--------
1 file changed, 1 insertion(+), 8 deletions(-)
diff --git a/hw/core/qdev.c b/hw/core/qdev.c
index 06ba02e2a3..800c9ca23f 100644
--- a/hw/core/qdev.c
+++ b/hw/core/qdev.c
@@ -1016,13 +1016,6 @@ static bool device_get_hotplugged(Object *obj, Error **err)
return dev->hotplugged;
}
-static void device_set_hotplugged(Object *obj, bool value, Error **err)
-{
- DeviceState *dev = DEVICE(obj);
-
- dev->hotplugged = value;
-}
-
static void device_initfn(Object *obj)
{
DeviceState *dev = DEVICE(obj);
@@ -1042,7 +1035,7 @@ static void device_initfn(Object *obj)
object_property_add_bool(obj, "hotpluggable",
device_get_hotpluggable, NULL, NULL);
object_property_add_bool(obj, "hotplugged",
- device_get_hotplugged, device_set_hotplugged,
+ device_get_hotplugged, NULL,
&error_abort);
class = object_get_class(OBJECT(dev));
--
2.11.0.259.g40922b1
3
4
This series applies on top of "qemu: Detect host CPU model by asking
QEMU on x86_64".
https://bugzilla.redhat.com/show_bug.cgi?id=1406791
Jiri Denemark (2):
cpu_x86: Disable TSX on broken models
cputest: Add CPUID data for Haswell with TSX
src/cpu/cpu_x86.c | 72 ++++++++++++++++++++--
tests/cputest.c | 1 +
.../x86_64-cpuid-Core-i5-4670T-guest.xml | 2 +-
.../x86_64-cpuid-Core-i5-4670T-host.xml | 2 +-
.../x86_64-cpuid-Core-i5-4670T-json.xml | 2 +-
.../x86_64-cpuid-Xeon-E7-8890-guest.xml | 32 ++++++++++
.../cputestdata/x86_64-cpuid-Xeon-E7-8890-host.xml | 32 ++++++++++
tests/cputestdata/x86_64-cpuid-Xeon-E7-8890.xml | 37 +++++++++++
8 files changed, 172 insertions(+), 8 deletions(-)
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E7-8890-guest.xml
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E7-8890-host.xml
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E7-8890.xml
--
2.11.1
2
4
[libvirt] [PATCH v3 00/28] qemu: Detect host CPU model by asking QEMU on x86_64
by Jiri Denemark 03 Mar '17
by Jiri Denemark 03 Mar '17
03 Mar '17
Until now host-model CPU mode tried to enable all CPU features supported
by the host CPU even if QEMU/KVM did not support them. This caused a
number of issues and made host-model quite unreliable. Asking QEMU for
the CPU it can provide and the current host makes host-model much more
robust.
This series fixes the following bugs:
https://bugzilla.redhat.com/show_bug.cgi?id=1018251
https://bugzilla.redhat.com/show_bug.cgi?id=1371617
https://bugzilla.redhat.com/show_bug.cgi?id=1372581
https://bugzilla.redhat.com/show_bug.cgi?id=1404627
https://bugzilla.redhat.com/show_bug.cgi?id=870071
In addition to that, the following bug should be mostly limited to cases
when an unsupported feature is explicitly requested:
https://bugzilla.redhat.com/show_bug.cgi?id=1335534
The series relies on features which are not in QEMU yet, but should be
hopefully close enough to be pushed in 2.9.0. In the meantime, Eduardo's
work/x86-query-cpu-expansion-full branch can be used to play with them.
Version 3:
- a few patches which were ACKed in v2 and didn't have any dependencies
were pushed to make the series a bit smaller
- review comments addressed (see individual patches for more details)
Version 2:
- properly set vendor property in converted test data files
- fix cpu-parse.sh to use "x86_64" prefix for the generated files
Jiri Denemark (28):
qemucapstest: Update test data for QEMU 2.9.0
domaincapstest: Add test data for QEMU 2.9.0
qemu: Refactor virQEMUCapsInitHostCPUModel
qemu: Fix CPU model fallback in domain capabilities
docs: Update description of the host-model CPU mode
qemu: Rename hostCPU/feature element in capabilities cache
qemu: Prepare for more types in qemuMonitorCPUModelInfo
qemu: Store more types in qemuMonitorCPUModelInfo
qemu: Probe "max" CPU model in TCG
cpu_x86: Drop virCPUx86MakeData and use virCPUDataNew
cpu_x86: Make virCPUx86DataClear static
cpu: Rework cpuDataFree
cpu_x86: Make virCPUx86DataAddCPUID work with virCPUDataPtr
cpu_x86: Introduce virCPUx86DataSetSignature
cpu_x86: Introduce virCPUx86DataSetVendor
cpu_x86: Introduce virCPUx86DataAddFeature
qemu: Get host CPU model from QEMU on x86_64
qemu: Use enum for CPU model expansion type
qemu: Use full CPU model expansion on x86
qemu: Make virQEMUCapsInitCPUModel testable
cputest: Rename x86 data files
cputest: Use virArch enum rather then strings
cputest: Switch host CPU data scripts to model expansion
cputest: Convert all json data files to query-cpu-model-expansion
cputest: Test virQEMUCapsInitCPUModel
cputest: Drop obsolete CPU test data files
cputest: Drop .new suffix from CPU test data files
news: Detect host CPU model by asking QEMU on x86_64
docs/formatdomain.html.in | 37 +-
docs/news.xml | 11 +
src/bhyve/bhyve_capabilities.c | 2 +-
src/cpu/cpu.c | 21 +-
src/cpu/cpu.h | 4 +-
src/cpu/cpu_arm.c | 7 -
src/cpu/cpu_ppc64.c | 6 +-
src/cpu/cpu_s390.c | 7 -
src/cpu/cpu_x86.c | 280 ++++---
src/cpu/cpu_x86.h | 13 +-
src/libvirt_private.syms | 7 +-
src/libxl/libxl_capabilities.c | 18 +-
src/qemu/qemu_capabilities.c | 463 ++++++++---
src/qemu/qemu_capabilities.h | 3 +-
src/qemu/qemu_capspriv.h | 13 +-
src/qemu/qemu_command.c | 2 +-
src/qemu/qemu_monitor.c | 29 +-
src/qemu/qemu_monitor.h | 35 +-
src/qemu/qemu_monitor_json.c | 107 ++-
src/qemu/qemu_monitor_json.h | 4 +-
src/qemu/qemu_parse_command.c | 2 +-
src/qemu/qemu_process.c | 7 +-
src/vmware/vmware_conf.c | 2 +-
src/vz/vz_driver.c | 2 +-
tests/cputest.c | 324 ++++----
tests/cputestdata/cpu-convert.py | 249 ++++++
tests/cputestdata/cpu-gather.sh | 39 +-
tests/cputestdata/cpu-parse.sh | 5 +-
tests/cputestdata/x86-cpuid-A10-5800K.json | 77 --
tests/cputestdata/x86-cpuid-Core-i5-2500.json | 88 ---
tests/cputestdata/x86-cpuid-Core-i5-2540M.json | 82 --
tests/cputestdata/x86-cpuid-Core-i5-4670T.json | 77 --
tests/cputestdata/x86-cpuid-Core-i5-6600.json | 82 --
tests/cputestdata/x86-cpuid-Core-i7-2600.json | 77 --
tests/cputestdata/x86-cpuid-Core-i7-3740QM.json | 77 --
tests/cputestdata/x86-cpuid-Core-i7-3770.json | 77 --
tests/cputestdata/x86-cpuid-Core-i7-4600U.json | 82 --
tests/cputestdata/x86-cpuid-Core-i7-5600U-json.xml | 12 -
tests/cputestdata/x86-cpuid-Core-i7-5600U.json | 88 ---
tests/cputestdata/x86-cpuid-Core2-E6850.json | 77 --
tests/cputestdata/x86-cpuid-Opteron-2350.json | 71 --
tests/cputestdata/x86-cpuid-Opteron-6234.json | 88 ---
tests/cputestdata/x86-cpuid-Phenom-B95.json | 77 --
tests/cputestdata/x86-cpuid-Xeon-E3-1245.json | 88 ---
tests/cputestdata/x86-cpuid-Xeon-E5-2630.json | 77 --
tests/cputestdata/x86-cpuid-Xeon-E5-2650.json | 71 --
tests/cputestdata/x86-cpuid-Xeon-E7-4820.json | 77 --
tests/cputestdata/x86-cpuid-Xeon-W3520.json | 77 --
...ack.xml => x86_64-Haswell-noTSX-nofallback.xml} | 0
...-Haswell-noTSX.xml => x86_64-Haswell-noTSX.xml} | 0
.../{x86-Haswell.xml => x86_64-Haswell.xml} | 0
...e-1-result.xml => x86_64-baseline-1-result.xml} | 0
.../{x86-baseline-1.xml => x86_64-baseline-1.xml} | 0
...e-2-result.xml => x86_64-baseline-2-result.xml} | 0
.../{x86-baseline-2.xml => x86_64-baseline-2.xml} | 0
...expanded.xml => x86_64-baseline-3-expanded.xml} | 0
...e-3-result.xml => x86_64-baseline-3-result.xml} | 0
.../{x86-baseline-3.xml => x86_64-baseline-3.xml} | 0
...expanded.xml => x86_64-baseline-4-expanded.xml} | 0
...e-4-result.xml => x86_64-baseline-4-result.xml} | 0
.../{x86-baseline-4.xml => x86_64-baseline-4.xml} | 0
...expanded.xml => x86_64-baseline-5-expanded.xml} | 0
...e-5-result.xml => x86_64-baseline-5-result.xml} | 0
.../{x86-baseline-5.xml => x86_64-baseline-5.xml} | 0
...atable.xml => x86_64-baseline-6-migratable.xml} | 0
...e-6-result.xml => x86_64-baseline-6-result.xml} | 0
.../{x86-baseline-6.xml => x86_64-baseline-6.xml} | 0
...e-7-result.xml => x86_64-baseline-7-result.xml} | 0
.../{x86-baseline-7.xml => x86_64-baseline-7.xml} | 0
...e-8-result.xml => x86_64-baseline-8-result.xml} | 0
.../{x86-baseline-8.xml => x86_64-baseline-8.xml} | 0
...ml => x86_64-baseline-incompatible-vendors.xml} | 0
...lt.xml => x86_64-baseline-no-vendor-result.xml} | 0
...no-vendor.xml => x86_64-baseline-no-vendor.xml} | 0
...xml => x86_64-baseline-some-vendors-result.xml} | 0
...endors.xml => x86_64-baseline-some-vendors.xml} | 0
...-bogus-feature.xml => x86_64-bogus-feature.xml} | 0
...{x86-bogus-model.xml => x86_64-bogus-model.xml} | 0
...86-bogus-vendor.xml => x86_64-bogus-vendor.xml} | 0
...-guest.xml => x86_64-cpuid-A10-5800K-guest.xml} | 0
...0K-host.xml => x86_64-cpuid-A10-5800K-host.xml} | 0
...0K-json.xml => x86_64-cpuid-A10-5800K-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-A10-5800K.json | 203 +++++
...id-A10-5800K.xml => x86_64-cpuid-A10-5800K.xml} | 0
...-guest.xml => x86_64-cpuid-Atom-D510-guest.xml} | 0
...10-host.xml => x86_64-cpuid-Atom-D510-host.xml} | 0
...id-Atom-D510.xml => x86_64-cpuid-Atom-D510.xml} | 0
...-guest.xml => x86_64-cpuid-Atom-N450-guest.xml} | 0
...50-host.xml => x86_64-cpuid-Atom-N450-host.xml} | 0
...id-Atom-N450.xml => x86_64-cpuid-Atom-N450.xml} | 0
...est.xml => x86_64-cpuid-Core-i5-2500-guest.xml} | 0
...host.xml => x86_64-cpuid-Core-i5-2500-host.xml} | 0
...json.xml => x86_64-cpuid-Core-i5-2500-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i5-2500.json | 203 +++++
...e-i5-2500.xml => x86_64-cpuid-Core-i5-2500.xml} | 0
...st.xml => x86_64-cpuid-Core-i5-2540M-guest.xml} | 0
...ost.xml => x86_64-cpuid-Core-i5-2540M-host.xml} | 0
...son.xml => x86_64-cpuid-Core-i5-2540M-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i5-2540M.json | 203 +++++
...i5-2540M.xml => x86_64-cpuid-Core-i5-2540M.xml} | 0
...st.xml => x86_64-cpuid-Core-i5-4670T-guest.xml} | 0
...ost.xml => x86_64-cpuid-Core-i5-4670T-host.xml} | 0
...son.xml => x86_64-cpuid-Core-i5-4670T-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i5-4670T.json | 203 +++++
...i5-4670T.xml => x86_64-cpuid-Core-i5-4670T.xml} | 0
...est.xml => x86_64-cpuid-Core-i5-6600-guest.xml} | 0
...host.xml => x86_64-cpuid-Core-i5-6600-host.xml} | 0
...json.xml => x86_64-cpuid-Core-i5-6600-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i5-6600.json | 203 +++++
...e-i5-6600.xml => x86_64-cpuid-Core-i5-6600.xml} | 0
...est.xml => x86_64-cpuid-Core-i7-2600-guest.xml} | 0
...host.xml => x86_64-cpuid-Core-i7-2600-host.xml} | 0
...json.xml => x86_64-cpuid-Core-i7-2600-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i7-2600.json | 203 +++++
...e-i7-2600.xml => x86_64-cpuid-Core-i7-2600.xml} | 0
...st.xml => x86_64-cpuid-Core-i7-3520M-guest.xml} | 0
...ost.xml => x86_64-cpuid-Core-i7-3520M-host.xml} | 0
...i7-3520M.xml => x86_64-cpuid-Core-i7-3520M.xml} | 0
...t.xml => x86_64-cpuid-Core-i7-3740QM-guest.xml} | 0
...st.xml => x86_64-cpuid-Core-i7-3740QM-host.xml} | 0
...on.xml => x86_64-cpuid-Core-i7-3740QM-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i7-3740QM.json | 203 +++++
...-3740QM.xml => x86_64-cpuid-Core-i7-3740QM.xml} | 0
...est.xml => x86_64-cpuid-Core-i7-3770-guest.xml} | 0
...host.xml => x86_64-cpuid-Core-i7-3770-host.xml} | 0
...json.xml => x86_64-cpuid-Core-i7-3770-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i7-3770.json | 203 +++++
...e-i7-3770.xml => x86_64-cpuid-Core-i7-3770.xml} | 0
...st.xml => x86_64-cpuid-Core-i7-4600U-guest.xml} | 0
...ost.xml => x86_64-cpuid-Core-i7-4600U-host.xml} | 0
...son.xml => x86_64-cpuid-Core-i7-4600U-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Core-i7-4600U.json | 203 +++++
...i7-4600U.xml => x86_64-cpuid-Core-i7-4600U.xml} | 0
...st.xml => x86_64-cpuid-Core-i7-5600U-guest.xml} | 0
...ost.xml => x86_64-cpuid-Core-i7-5600U-host.xml} | 0
.../x86_64-cpuid-Core-i7-5600U-json.xml | 16 +
tests/cputestdata/x86_64-cpuid-Core-i7-5600U.json | 203 +++++
...i7-5600U.xml => x86_64-cpuid-Core-i7-5600U.xml} | 0
...uest.xml => x86_64-cpuid-Core2-E6850-guest.xml} | 0
...-host.xml => x86_64-cpuid-Core2-E6850-host.xml} | 0
...-json.xml => x86_64-cpuid-Core2-E6850-json.xml} | 5 +-
tests/cputestdata/x86_64-cpuid-Core2-E6850.json | 203 +++++
...ore2-E6850.xml => x86_64-cpuid-Core2-E6850.xml} | 0
...uest.xml => x86_64-cpuid-Core2-Q9500-guest.xml} | 0
...-host.xml => x86_64-cpuid-Core2-Q9500-host.xml} | 0
...ore2-Q9500.xml => x86_64-cpuid-Core2-Q9500.xml} | 0
...50-guest.xml => x86_64-cpuid-FX-8150-guest.xml} | 0
...8150-host.xml => x86_64-cpuid-FX-8150-host.xml} | 0
...-cpuid-FX-8150.xml => x86_64-cpuid-FX-8150.xml} | 0
...est.xml => x86_64-cpuid-Opteron-1352-guest.xml} | 0
...host.xml => x86_64-cpuid-Opteron-1352-host.xml} | 0
...eron-1352.xml => x86_64-cpuid-Opteron-1352.xml} | 0
...est.xml => x86_64-cpuid-Opteron-2350-guest.xml} | 0
...host.xml => x86_64-cpuid-Opteron-2350-host.xml} | 0
...json.xml => x86_64-cpuid-Opteron-2350-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Opteron-2350.json | 203 +++++
...eron-2350.xml => x86_64-cpuid-Opteron-2350.xml} | 0
...est.xml => x86_64-cpuid-Opteron-6234-guest.xml} | 0
...host.xml => x86_64-cpuid-Opteron-6234-host.xml} | 0
...json.xml => x86_64-cpuid-Opteron-6234-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Opteron-6234.json | 203 +++++
...eron-6234.xml => x86_64-cpuid-Opteron-6234.xml} | 0
...est.xml => x86_64-cpuid-Opteron-6282-guest.xml} | 0
...host.xml => x86_64-cpuid-Opteron-6282-host.xml} | 0
...eron-6282.xml => x86_64-cpuid-Opteron-6282.xml} | 0
...st.xml => x86_64-cpuid-Pentium-P6100-guest.xml} | 0
...ost.xml => x86_64-cpuid-Pentium-P6100-host.xml} | 0
...um-P6100.xml => x86_64-cpuid-Pentium-P6100.xml} | 0
...guest.xml => x86_64-cpuid-Phenom-B95-guest.xml} | 0
...5-host.xml => x86_64-cpuid-Phenom-B95-host.xml} | 0
...5-json.xml => x86_64-cpuid-Phenom-B95-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Phenom-B95.json | 203 +++++
...-Phenom-B95.xml => x86_64-cpuid-Phenom-B95.xml} | 0
...-guest.xml => x86_64-cpuid-Xeon-5110-guest.xml} | 0
...10-host.xml => x86_64-cpuid-Xeon-5110-host.xml} | 0
...id-Xeon-5110.xml => x86_64-cpuid-Xeon-5110.xml} | 0
...est.xml => x86_64-cpuid-Xeon-E3-1245-guest.xml} | 0
...host.xml => x86_64-cpuid-Xeon-E3-1245-host.xml} | 0
...json.xml => x86_64-cpuid-Xeon-E3-1245-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Xeon-E3-1245.json | 203 +++++
...n-E3-1245.xml => x86_64-cpuid-Xeon-E3-1245.xml} | 0
...est.xml => x86_64-cpuid-Xeon-E5-2630-guest.xml} | 0
...host.xml => x86_64-cpuid-Xeon-E5-2630-host.xml} | 0
...json.xml => x86_64-cpuid-Xeon-E5-2630-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Xeon-E5-2630.json | 203 +++++
...n-E5-2630.xml => x86_64-cpuid-Xeon-E5-2630.xml} | 0
...est.xml => x86_64-cpuid-Xeon-E5-2650-guest.xml} | 0
...host.xml => x86_64-cpuid-Xeon-E5-2650-host.xml} | 0
...json.xml => x86_64-cpuid-Xeon-E5-2650-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Xeon-E5-2650.json | 203 +++++
...n-E5-2650.xml => x86_64-cpuid-Xeon-E5-2650.xml} | 0
...est.xml => x86_64-cpuid-Xeon-E7-4820-guest.xml} | 0
...host.xml => x86_64-cpuid-Xeon-E7-4820-host.xml} | 0
...json.xml => x86_64-cpuid-Xeon-E7-4820-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Xeon-E7-4820.json | 203 +++++
...n-E7-4820.xml => x86_64-cpuid-Xeon-E7-4820.xml} | 0
...guest.xml => x86_64-cpuid-Xeon-W3520-guest.xml} | 0
...0-host.xml => x86_64-cpuid-Xeon-W3520-host.xml} | 0
...0-json.xml => x86_64-cpuid-Xeon-W3520-json.xml} | 1 +
tests/cputestdata/x86_64-cpuid-Xeon-W3520.json | 203 +++++
...-Xeon-W3520.xml => x86_64-cpuid-Xeon-W3520.xml} | 0
...guest.xml => x86_64-cpuid-Xeon-X5460-guest.xml} | 0
...0-host.xml => x86_64-cpuid-Xeon-X5460-host.xml} | 0
...-Xeon-X5460.xml => x86_64-cpuid-Xeon-X5460.xml} | 0
...le-extra.xml => x86_64-exact-disable-extra.xml} | 0
...-exact-disable.xml => x86_64-exact-disable.xml} | 0
...xact-disable2.xml => x86_64-exact-disable2.xml} | 0
...bid-extra.xml => x86_64-exact-forbid-extra.xml} | 0
...86-exact-forbid.xml => x86_64-exact-forbid.xml} | 0
...-Haswell.xml => x86_64-exact-force-Haswell.xml} | 0
...{x86-exact-force.xml => x86_64-exact-force.xml} | 0
...re-extra.xml => x86_64-exact-require-extra.xml} | 0
...-exact-require.xml => x86_64-exact-require.xml} | 0
.../{x86-exact.xml => x86_64-exact.xml} | 0
...-nofallback.xml => x86_64-guest-nofallback.xml} | 0
.../{x86-guest.xml => x86_64-guest.xml} | 0
...t.xml => x86_64-host+guest,model486-result.xml} | 0
...ult.xml => x86_64-host+guest,models-result.xml} | 0
...est-result.xml => x86_64-host+guest-result.xml} | 0
.../{x86-host+guest.xml => x86_64-host+guest.xml} | 0
... x86_64-host+host+host-model,models-result.xml} | 0
...k.xml => x86_64-host+host-model-nofallback.xml} | 0
...t+host-model.xml => x86_64-host+host-model.xml} | 0
...l => x86_64-host+host-passthrough-features.xml} | 0
...hrough.xml => x86_64-host+host-passthrough.xml} | 0
.../{x86-host+min.xml => x86_64-host+min.xml} | 0
...ult.xml => x86_64-host+penryn-force-result.xml} | 0
...-host+pentium3.xml => x86_64-host+pentium3.xml} | 0
...l => x86_64-host+strict-force-extra-result.xml} | 0
...-host-Haswell-noTSX+Haswell,haswell-result.xml} | 0
...Haswell-noTSX+Haswell-noTSX,haswell-result.xml} | 0
...64-host-Haswell-noTSX+Haswell-noTSX-result.xml} | 0
...ell-noTSX.xml => x86_64-host-Haswell-noTSX.xml} | 0
...SandyBridge.xml => x86_64-host-SandyBridge.xml} | 0
...-host-amd-fake.xml => x86_64-host-amd-fake.xml} | 0
.../{x86-host-amd.xml => x86_64-host-amd.xml} | 0
....xml => x86_64-host-better+pentium3-result.xml} | 0
...{x86-host-better.xml => x86_64-host-better.xml} | 0
...incomp-arch.xml => x86_64-host-incomp-arch.xml} | 0
...model.xml => x86_64-host-invtsc+host-model.xml} | 0
...{x86-host-invtsc.xml => x86_64-host-invtsc.xml} | 0
...llback.xml => x86_64-host-model-nofallback.xml} | 0
.../{x86-host-model.xml => x86_64-host-model.xml} | 0
...ost-no-vendor.xml => x86_64-host-no-vendor.xml} | 0
...es.xml => x86_64-host-passthrough-features.xml} | 0
...passthrough.xml => x86_64-host-passthrough.xml} | 0
...sult.xml => x86_64-host-worse+guest-result.xml} | 0
.../{x86-host-worse.xml => x86_64-host-worse.xml} | 0
.../cputestdata/{x86-host.xml => x86_64-host.xml} | 0
tests/cputestdata/{x86-min.xml => x86_64-min.xml} | 0
...86-penryn-force.xml => x86_64-penryn-force.xml} | 0
...86-pentium3-amd.xml => x86_64-pentium3-amd.xml} | 0
.../{x86-pentium3.xml => x86_64-pentium3.xml} | 0
...trict-disable.xml => x86_64-strict-disable.xml} | 0
...rce-extra.xml => x86_64-strict-force-extra.xml} | 0
...{x86-strict-full.xml => x86_64-strict-full.xml} | 0
.../{x86-strict.xml => x86_64-strict.xml} | 0
tests/domaincapsschemadata/qemu_2.8.0.s390x.xml | 2 +-
.../domaincapsschemadata/qemu_2.9.0-tcg.x86_64.xml | 145 ++++
tests/domaincapsschemadata/qemu_2.9.0.x86_64.xml | 124 +++
tests/domaincapstest.c | 8 +
.../qemucapabilitiesdata/caps_2.8.0.s390x.replies | 8 +
tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml | 32 +-
.../qemucapabilitiesdata/caps_2.9.0.x86_64.replies | 879 ++++++++++++++++++++-
tests/qemucapabilitiesdata/caps_2.9.0.x86_64.xml | 465 ++++++++++-
tests/qemumonitorjsontest.c | 4 +-
tests/qemuxml2argvtest.c | 3 +-
267 files changed, 6731 insertions(+), 2055 deletions(-)
create mode 100755 tests/cputestdata/cpu-convert.py
delete mode 100644 tests/cputestdata/x86-cpuid-A10-5800K.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i5-2500.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i5-2540M.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i5-4670T.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i5-6600.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i7-2600.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i7-3740QM.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i7-3770.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i7-4600U.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i7-5600U-json.xml
delete mode 100644 tests/cputestdata/x86-cpuid-Core-i7-5600U.json
delete mode 100644 tests/cputestdata/x86-cpuid-Core2-E6850.json
delete mode 100644 tests/cputestdata/x86-cpuid-Opteron-2350.json
delete mode 100644 tests/cputestdata/x86-cpuid-Opteron-6234.json
delete mode 100644 tests/cputestdata/x86-cpuid-Phenom-B95.json
delete mode 100644 tests/cputestdata/x86-cpuid-Xeon-E3-1245.json
delete mode 100644 tests/cputestdata/x86-cpuid-Xeon-E5-2630.json
delete mode 100644 tests/cputestdata/x86-cpuid-Xeon-E5-2650.json
delete mode 100644 tests/cputestdata/x86-cpuid-Xeon-E7-4820.json
delete mode 100644 tests/cputestdata/x86-cpuid-Xeon-W3520.json
rename tests/cputestdata/{x86-Haswell-noTSX-nofallback.xml => x86_64-Haswell-noTSX-nofallback.xml} (100%)
rename tests/cputestdata/{x86-Haswell-noTSX.xml => x86_64-Haswell-noTSX.xml} (100%)
rename tests/cputestdata/{x86-Haswell.xml => x86_64-Haswell.xml} (100%)
rename tests/cputestdata/{x86-baseline-1-result.xml => x86_64-baseline-1-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-1.xml => x86_64-baseline-1.xml} (100%)
rename tests/cputestdata/{x86-baseline-2-result.xml => x86_64-baseline-2-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-2.xml => x86_64-baseline-2.xml} (100%)
rename tests/cputestdata/{x86-baseline-3-expanded.xml => x86_64-baseline-3-expanded.xml} (100%)
rename tests/cputestdata/{x86-baseline-3-result.xml => x86_64-baseline-3-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-3.xml => x86_64-baseline-3.xml} (100%)
rename tests/cputestdata/{x86-baseline-4-expanded.xml => x86_64-baseline-4-expanded.xml} (100%)
rename tests/cputestdata/{x86-baseline-4-result.xml => x86_64-baseline-4-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-4.xml => x86_64-baseline-4.xml} (100%)
rename tests/cputestdata/{x86-baseline-5-expanded.xml => x86_64-baseline-5-expanded.xml} (100%)
rename tests/cputestdata/{x86-baseline-5-result.xml => x86_64-baseline-5-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-5.xml => x86_64-baseline-5.xml} (100%)
rename tests/cputestdata/{x86-baseline-6-migratable.xml => x86_64-baseline-6-migratable.xml} (100%)
rename tests/cputestdata/{x86-baseline-6-result.xml => x86_64-baseline-6-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-6.xml => x86_64-baseline-6.xml} (100%)
rename tests/cputestdata/{x86-baseline-7-result.xml => x86_64-baseline-7-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-7.xml => x86_64-baseline-7.xml} (100%)
rename tests/cputestdata/{x86-baseline-8-result.xml => x86_64-baseline-8-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-8.xml => x86_64-baseline-8.xml} (100%)
rename tests/cputestdata/{x86-baseline-incompatible-vendors.xml => x86_64-baseline-incompatible-vendors.xml} (100%)
rename tests/cputestdata/{x86-baseline-no-vendor-result.xml => x86_64-baseline-no-vendor-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-no-vendor.xml => x86_64-baseline-no-vendor.xml} (100%)
rename tests/cputestdata/{x86-baseline-some-vendors-result.xml => x86_64-baseline-some-vendors-result.xml} (100%)
rename tests/cputestdata/{x86-baseline-some-vendors.xml => x86_64-baseline-some-vendors.xml} (100%)
rename tests/cputestdata/{x86-bogus-feature.xml => x86_64-bogus-feature.xml} (100%)
rename tests/cputestdata/{x86-bogus-model.xml => x86_64-bogus-model.xml} (100%)
rename tests/cputestdata/{x86-bogus-vendor.xml => x86_64-bogus-vendor.xml} (100%)
rename tests/cputestdata/{x86-cpuid-A10-5800K-guest.xml => x86_64-cpuid-A10-5800K-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-A10-5800K-host.xml => x86_64-cpuid-A10-5800K-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-A10-5800K-json.xml => x86_64-cpuid-A10-5800K-json.xml} (96%)
create mode 100644 tests/cputestdata/x86_64-cpuid-A10-5800K.json
rename tests/cputestdata/{x86-cpuid-A10-5800K.xml => x86_64-cpuid-A10-5800K.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Atom-D510-guest.xml => x86_64-cpuid-Atom-D510-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Atom-D510-host.xml => x86_64-cpuid-Atom-D510-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Atom-D510.xml => x86_64-cpuid-Atom-D510.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Atom-N450-guest.xml => x86_64-cpuid-Atom-N450-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Atom-N450-host.xml => x86_64-cpuid-Atom-N450-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Atom-N450.xml => x86_64-cpuid-Atom-N450.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-2500-guest.xml => x86_64-cpuid-Core-i5-2500-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-2500-host.xml => x86_64-cpuid-Core-i5-2500-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-2540M-json.xml => x86_64-cpuid-Core-i5-2500-json.xml} (94%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i5-2500.json
rename tests/cputestdata/{x86-cpuid-Core-i5-2500.xml => x86_64-cpuid-Core-i5-2500.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-2540M-guest.xml => x86_64-cpuid-Core-i5-2540M-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-2540M-host.xml => x86_64-cpuid-Core-i5-2540M-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-2500-json.xml => x86_64-cpuid-Core-i5-2540M-json.xml} (94%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i5-2540M.json
rename tests/cputestdata/{x86-cpuid-Core-i5-2540M.xml => x86_64-cpuid-Core-i5-2540M.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-4670T-guest.xml => x86_64-cpuid-Core-i5-4670T-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-4670T-host.xml => x86_64-cpuid-Core-i5-4670T-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-4670T-json.xml => x86_64-cpuid-Core-i5-4670T-json.xml} (95%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i5-4670T.json
rename tests/cputestdata/{x86-cpuid-Core-i5-4670T.xml => x86_64-cpuid-Core-i5-4670T.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-6600-guest.xml => x86_64-cpuid-Core-i5-6600-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-6600-host.xml => x86_64-cpuid-Core-i5-6600-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i5-6600-json.xml => x86_64-cpuid-Core-i5-6600-json.xml} (93%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i5-6600.json
rename tests/cputestdata/{x86-cpuid-Core-i5-6600.xml => x86_64-cpuid-Core-i5-6600.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-2600-guest.xml => x86_64-cpuid-Core-i7-2600-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-2600-host.xml => x86_64-cpuid-Core-i7-2600-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-2600-json.xml => x86_64-cpuid-Core-i7-2600-json.xml} (93%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i7-2600.json
rename tests/cputestdata/{x86-cpuid-Core-i7-2600.xml => x86_64-cpuid-Core-i7-2600.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3520M-guest.xml => x86_64-cpuid-Core-i7-3520M-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3520M-host.xml => x86_64-cpuid-Core-i7-3520M-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3520M.xml => x86_64-cpuid-Core-i7-3520M.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3740QM-guest.xml => x86_64-cpuid-Core-i7-3740QM-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3740QM-host.xml => x86_64-cpuid-Core-i7-3740QM-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3740QM-json.xml => x86_64-cpuid-Core-i7-3740QM-json.xml} (93%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i7-3740QM.json
rename tests/cputestdata/{x86-cpuid-Core-i7-3740QM.xml => x86_64-cpuid-Core-i7-3740QM.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3770-guest.xml => x86_64-cpuid-Core-i7-3770-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3770-host.xml => x86_64-cpuid-Core-i7-3770-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-3770-json.xml => x86_64-cpuid-Core-i7-3770-json.xml} (92%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i7-3770.json
rename tests/cputestdata/{x86-cpuid-Core-i7-3770.xml => x86_64-cpuid-Core-i7-3770.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-4600U-guest.xml => x86_64-cpuid-Core-i7-4600U-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-4600U-host.xml => x86_64-cpuid-Core-i7-4600U-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-4600U-json.xml => x86_64-cpuid-Core-i7-4600U-json.xml} (95%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i7-4600U.json
rename tests/cputestdata/{x86-cpuid-Core-i7-4600U.xml => x86_64-cpuid-Core-i7-4600U.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-5600U-guest.xml => x86_64-cpuid-Core-i7-5600U-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core-i7-5600U-host.xml => x86_64-cpuid-Core-i7-5600U-host.xml} (100%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i7-5600U-json.xml
create mode 100644 tests/cputestdata/x86_64-cpuid-Core-i7-5600U.json
rename tests/cputestdata/{x86-cpuid-Core-i7-5600U.xml => x86_64-cpuid-Core-i7-5600U.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core2-E6850-guest.xml => x86_64-cpuid-Core2-E6850-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core2-E6850-host.xml => x86_64-cpuid-Core2-E6850-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core2-E6850-json.xml => x86_64-cpuid-Core2-E6850-json.xml} (75%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Core2-E6850.json
rename tests/cputestdata/{x86-cpuid-Core2-E6850.xml => x86_64-cpuid-Core2-E6850.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core2-Q9500-guest.xml => x86_64-cpuid-Core2-Q9500-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core2-Q9500-host.xml => x86_64-cpuid-Core2-Q9500-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Core2-Q9500.xml => x86_64-cpuid-Core2-Q9500.xml} (100%)
rename tests/cputestdata/{x86-cpuid-FX-8150-guest.xml => x86_64-cpuid-FX-8150-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-FX-8150-host.xml => x86_64-cpuid-FX-8150-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-FX-8150.xml => x86_64-cpuid-FX-8150.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-1352-guest.xml => x86_64-cpuid-Opteron-1352-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-1352-host.xml => x86_64-cpuid-Opteron-1352-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-1352.xml => x86_64-cpuid-Opteron-1352.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-2350-guest.xml => x86_64-cpuid-Opteron-2350-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-2350-host.xml => x86_64-cpuid-Opteron-2350-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-2350-json.xml => x86_64-cpuid-Opteron-2350-json.xml} (97%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Opteron-2350.json
rename tests/cputestdata/{x86-cpuid-Opteron-2350.xml => x86_64-cpuid-Opteron-2350.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-6234-guest.xml => x86_64-cpuid-Opteron-6234-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-6234-host.xml => x86_64-cpuid-Opteron-6234-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-6234-json.xml => x86_64-cpuid-Opteron-6234-json.xml} (96%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Opteron-6234.json
rename tests/cputestdata/{x86-cpuid-Opteron-6234.xml => x86_64-cpuid-Opteron-6234.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-6282-guest.xml => x86_64-cpuid-Opteron-6282-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-6282-host.xml => x86_64-cpuid-Opteron-6282-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Opteron-6282.xml => x86_64-cpuid-Opteron-6282.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Pentium-P6100-guest.xml => x86_64-cpuid-Pentium-P6100-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Pentium-P6100-host.xml => x86_64-cpuid-Pentium-P6100-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Pentium-P6100.xml => x86_64-cpuid-Pentium-P6100.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Phenom-B95-guest.xml => x86_64-cpuid-Phenom-B95-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Phenom-B95-host.xml => x86_64-cpuid-Phenom-B95-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Phenom-B95-json.xml => x86_64-cpuid-Phenom-B95-json.xml} (97%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Phenom-B95.json
rename tests/cputestdata/{x86-cpuid-Phenom-B95.xml => x86_64-cpuid-Phenom-B95.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-5110-guest.xml => x86_64-cpuid-Xeon-5110-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-5110-host.xml => x86_64-cpuid-Xeon-5110-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-5110.xml => x86_64-cpuid-Xeon-5110.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E3-1245-guest.xml => x86_64-cpuid-Xeon-E3-1245-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E3-1245-host.xml => x86_64-cpuid-Xeon-E3-1245-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E3-1245-json.xml => x86_64-cpuid-Xeon-E3-1245-json.xml} (93%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E3-1245.json
rename tests/cputestdata/{x86-cpuid-Xeon-E3-1245.xml => x86_64-cpuid-Xeon-E3-1245.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2630-guest.xml => x86_64-cpuid-Xeon-E5-2630-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2630-host.xml => x86_64-cpuid-Xeon-E5-2630-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2630-json.xml => x86_64-cpuid-Xeon-E5-2630-json.xml} (95%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E5-2630.json
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2630.xml => x86_64-cpuid-Xeon-E5-2630.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2650-guest.xml => x86_64-cpuid-Xeon-E5-2650-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2650-host.xml => x86_64-cpuid-Xeon-E5-2650-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2650-json.xml => x86_64-cpuid-Xeon-E5-2650-json.xml} (94%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E5-2650.json
rename tests/cputestdata/{x86-cpuid-Xeon-E5-2650.xml => x86_64-cpuid-Xeon-E5-2650.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E7-4820-guest.xml => x86_64-cpuid-Xeon-E7-4820-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E7-4820-host.xml => x86_64-cpuid-Xeon-E7-4820-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-E7-4820-json.xml => x86_64-cpuid-Xeon-E7-4820-json.xml} (94%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-E7-4820.json
rename tests/cputestdata/{x86-cpuid-Xeon-E7-4820.xml => x86_64-cpuid-Xeon-E7-4820.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-W3520-guest.xml => x86_64-cpuid-Xeon-W3520-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-W3520-host.xml => x86_64-cpuid-Xeon-W3520-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-W3520-json.xml => x86_64-cpuid-Xeon-W3520-json.xml} (93%)
create mode 100644 tests/cputestdata/x86_64-cpuid-Xeon-W3520.json
rename tests/cputestdata/{x86-cpuid-Xeon-W3520.xml => x86_64-cpuid-Xeon-W3520.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-X5460-guest.xml => x86_64-cpuid-Xeon-X5460-guest.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-X5460-host.xml => x86_64-cpuid-Xeon-X5460-host.xml} (100%)
rename tests/cputestdata/{x86-cpuid-Xeon-X5460.xml => x86_64-cpuid-Xeon-X5460.xml} (100%)
rename tests/cputestdata/{x86-exact-disable-extra.xml => x86_64-exact-disable-extra.xml} (100%)
rename tests/cputestdata/{x86-exact-disable.xml => x86_64-exact-disable.xml} (100%)
rename tests/cputestdata/{x86-exact-disable2.xml => x86_64-exact-disable2.xml} (100%)
rename tests/cputestdata/{x86-exact-forbid-extra.xml => x86_64-exact-forbid-extra.xml} (100%)
rename tests/cputestdata/{x86-exact-forbid.xml => x86_64-exact-forbid.xml} (100%)
rename tests/cputestdata/{x86-exact-force-Haswell.xml => x86_64-exact-force-Haswell.xml} (100%)
rename tests/cputestdata/{x86-exact-force.xml => x86_64-exact-force.xml} (100%)
rename tests/cputestdata/{x86-exact-require-extra.xml => x86_64-exact-require-extra.xml} (100%)
rename tests/cputestdata/{x86-exact-require.xml => x86_64-exact-require.xml} (100%)
rename tests/cputestdata/{x86-exact.xml => x86_64-exact.xml} (100%)
rename tests/cputestdata/{x86-guest-nofallback.xml => x86_64-guest-nofallback.xml} (100%)
rename tests/cputestdata/{x86-guest.xml => x86_64-guest.xml} (100%)
rename tests/cputestdata/{x86-host+guest,model486-result.xml => x86_64-host+guest,model486-result.xml} (100%)
rename tests/cputestdata/{x86-host+guest,models-result.xml => x86_64-host+guest,models-result.xml} (100%)
rename tests/cputestdata/{x86-host+guest-result.xml => x86_64-host+guest-result.xml} (100%)
rename tests/cputestdata/{x86-host+guest.xml => x86_64-host+guest.xml} (100%)
rename tests/cputestdata/{x86-host+host+host-model,models-result.xml => x86_64-host+host+host-model,models-result.xml} (100%)
rename tests/cputestdata/{x86-host+host-model-nofallback.xml => x86_64-host+host-model-nofallback.xml} (100%)
rename tests/cputestdata/{x86-host+host-model.xml => x86_64-host+host-model.xml} (100%)
rename tests/cputestdata/{x86-host+host-passthrough-features.xml => x86_64-host+host-passthrough-features.xml} (100%)
rename tests/cputestdata/{x86-host+host-passthrough.xml => x86_64-host+host-passthrough.xml} (100%)
rename tests/cputestdata/{x86-host+min.xml => x86_64-host+min.xml} (100%)
rename tests/cputestdata/{x86-host+penryn-force-result.xml => x86_64-host+penryn-force-result.xml} (100%)
rename tests/cputestdata/{x86-host+pentium3.xml => x86_64-host+pentium3.xml} (100%)
rename tests/cputestdata/{x86-host+strict-force-extra-result.xml => x86_64-host+strict-force-extra-result.xml} (100%)
rename tests/cputestdata/{x86-host-Haswell-noTSX+Haswell,haswell-result.xml => x86_64-host-Haswell-noTSX+Haswell,haswell-result.xml} (100%)
rename tests/cputestdata/{x86-host-Haswell-noTSX+Haswell-noTSX,haswell-result.xml => x86_64-host-Haswell-noTSX+Haswell-noTSX,haswell-result.xml} (100%)
rename tests/cputestdata/{x86-host-Haswell-noTSX+Haswell-noTSX-result.xml => x86_64-host-Haswell-noTSX+Haswell-noTSX-result.xml} (100%)
rename tests/cputestdata/{x86-host-Haswell-noTSX.xml => x86_64-host-Haswell-noTSX.xml} (100%)
rename tests/cputestdata/{x86-host-SandyBridge.xml => x86_64-host-SandyBridge.xml} (100%)
rename tests/cputestdata/{x86-host-amd-fake.xml => x86_64-host-amd-fake.xml} (100%)
rename tests/cputestdata/{x86-host-amd.xml => x86_64-host-amd.xml} (100%)
rename tests/cputestdata/{x86-host-better+pentium3-result.xml => x86_64-host-better+pentium3-result.xml} (100%)
rename tests/cputestdata/{x86-host-better.xml => x86_64-host-better.xml} (100%)
rename tests/cputestdata/{x86-host-incomp-arch.xml => x86_64-host-incomp-arch.xml} (100%)
rename tests/cputestdata/{x86-host-invtsc+host-model.xml => x86_64-host-invtsc+host-model.xml} (100%)
rename tests/cputestdata/{x86-host-invtsc.xml => x86_64-host-invtsc.xml} (100%)
rename tests/cputestdata/{x86-host-model-nofallback.xml => x86_64-host-model-nofallback.xml} (100%)
rename tests/cputestdata/{x86-host-model.xml => x86_64-host-model.xml} (100%)
rename tests/cputestdata/{x86-host-no-vendor.xml => x86_64-host-no-vendor.xml} (100%)
rename tests/cputestdata/{x86-host-passthrough-features.xml => x86_64-host-passthrough-features.xml} (100%)
rename tests/cputestdata/{x86-host-passthrough.xml => x86_64-host-passthrough.xml} (100%)
rename tests/cputestdata/{x86-host-worse+guest-result.xml => x86_64-host-worse+guest-result.xml} (100%)
rename tests/cputestdata/{x86-host-worse.xml => x86_64-host-worse.xml} (100%)
rename tests/cputestdata/{x86-host.xml => x86_64-host.xml} (100%)
rename tests/cputestdata/{x86-min.xml => x86_64-min.xml} (100%)
rename tests/cputestdata/{x86-penryn-force.xml => x86_64-penryn-force.xml} (100%)
rename tests/cputestdata/{x86-pentium3-amd.xml => x86_64-pentium3-amd.xml} (100%)
rename tests/cputestdata/{x86-pentium3.xml => x86_64-pentium3.xml} (100%)
rename tests/cputestdata/{x86-strict-disable.xml => x86_64-strict-disable.xml} (100%)
rename tests/cputestdata/{x86-strict-force-extra.xml => x86_64-strict-force-extra.xml} (100%)
rename tests/cputestdata/{x86-strict-full.xml => x86_64-strict-full.xml} (100%)
rename tests/cputestdata/{x86-strict.xml => x86_64-strict.xml} (100%)
create mode 100644 tests/domaincapsschemadata/qemu_2.9.0-tcg.x86_64.xml
create mode 100644 tests/domaincapsschemadata/qemu_2.9.0.x86_64.xml
--
2.11.1
3
54
Hey!
Back when channels were introduced in libxl (in answer to Michal[0]) I
suggested the idea of integrating qemu guest agent (which currently lives
qemu driver).
This series is an attempt at pulling qemu agent from qemu driver into util in
using it in libxl in subsequent patches. What do folks think of the idea? Note
that this is still all very RFC because 1) there's a lot of code we could
potentially share between qemu and libxl with respect to finding guest agent
config and keeping some of its state (see patch 3); 2) also we need to ignore
"execute" messages to be able to query the agent and see it's returned data
(patch 2). Which despite the commit not being incorrect I am not sure yet why
we need it yet.
As PoC I only implemented domainQemuAgentCommand/domainInterfaceAddresses, but
there's a lot more driver APIs we can potentially introduce after this. Tracing
all driver APIs that might require a guest agent:
* domainFSThaw, domainFSFreeze, domainFSTrim, domainGetFSInfo
* domainSetUserPassword
* domainGetTime, domainSetTime
* domainShutdown (with VIR_DOMAIN_SHUTDOWN_GUEST_AGENT)
* domainReboot (with VIR_DOMAIN_REBOOT_GUEST_AGENT)
* domainGetGuestVcpus
* domainSetGuestVcpus
* domainSetVcpusFlags (with VIR_DOMAIN_VCPU_GUEST)
* domainGetVcpusFlags (with VIR_DOMAIN_VCPU_GUEST)
Comments/Feedback is appreciated :)
Cheers,
Joao
[0] https://www.spinics.net/linux/fedora/libvir/msg136685.html
Joao Martins (4):
qemu_agent: move agent into util
qemu_agent: ignore requests echoed back by guest
libxl: implement qemu-agent-command
libxl: domainInterfaceAddresses agent support
po/POTFILES.in | 2 +-
src/Makefile.am | 2 +-
src/libvirt_private.syms | 21 +
src/libxl/libxl_domain.c | 239 ++++-
src/libxl/libxl_domain.h | 16 +
src/libxl/libxl_driver.c | 69 ++
src/qemu/qemu_agent.c | 2248 -----------------------------------------
src/qemu/qemu_agent.h | 123 ---
src/qemu/qemu_domain.h | 2 +-
src/qemu/qemu_driver.c | 2 +-
src/util/virqemuagent.c | 2249 ++++++++++++++++++++++++++++++++++++++++++
src/util/virqemuagent.h | 123 +++
tests/qemuagenttest.c | 2 +-
tests/qemumonitortestutils.c | 2 +-
tests/qemumonitortestutils.h | 2 +-
15 files changed, 2723 insertions(+), 2379 deletions(-)
delete mode 100644 src/qemu/qemu_agent.c
delete mode 100644 src/qemu/qemu_agent.h
create mode 100644 src/util/virqemuagent.c
create mode 100644 src/util/virqemuagent.h
--
2.1.4
3
17
03 Mar '17
From: Derbyshev Dmitry <dderbyshev(a)virtuozzo.com>
Provides about 20% boost on local machine with 35 vms.
virEventPollDispatchHandles can also be split to pass cb via epoll
data field.
Should start sending as PATCH instead?
Changes since v1:
* ifdef supstituded by 2 .c files with vireventpollinternal.h
implementations
* PROBE purged
Derbyshev Dmitry (2):
vireventpoll: isolate common code
vireventpoll implimentation using epoll
configure.ac | 28 +
src/Makefile.am | 12 +-
src/util/vireventepoll.c | 201 +++++++
src/util/vireventpoll.c | 700 ++--------------------
src/util/{vireventpoll.c => vireventpollcommon.c} | 231 ++-----
src/util/vireventpollinternal.h | 91 +++
tests/commanddata/{test14.log => test3epoll.log} | 2 +
tests/commandtest.c | 4 +
8 files changed, 451 insertions(+), 818 deletions(-)
create mode 100644 src/util/vireventepoll.c
copy src/util/{vireventpoll.c => vireventpollcommon.c} (78%)
create mode 100644 src/util/vireventpollinternal.h
copy tests/commanddata/{test14.log => test3epoll.log} (94%)
--
1.9.5.msysgit.0
3
4
Hello,
This patch is for allowing 32-bit ARMs for aarch64 hosts. Exact 32-bit
personality for aarch64 is aarch32, but there is no such arch in libvirt, so we
use arvm7l instead.
Matwey V. Kornilov (1):
Add aarch64 to virArch
src/lxc/lxc_container.c | 2 ++
1 file changed, 2 insertions(+)
--
2.1.4
2
2
v1: http://www.redhat.com/archives/libvir-list/2017-February/msg00897.html
v1 cover letter reiterated:
Patches 1, 3 -> 9 are primarily quite a bit of code motion in order to allow
reuse of the "core" of the chardev TLS code.
Theoretically speaking of course, these patches should work - I don't
have a TLS and migration environment to test with, so between following
the qemu command model on Daniel's blog and prior experience with the
chardev TLS would
I added the saving of a flag to the private qemu domain state, although
I'm not 100% sure it was necessary. At one time I created the source TLS
objects during the Begin phase, but later decided to wait until just
before the migration is run. I think the main reason to have the flag
would be a restart of libvirtd to let 'something' know migration using
TLS was configured. I think it may only be "necessary" in order to
repopulate the migSecinfo after libvirtd restart, but it's not entirely
clear. By the time I started thinking more about while writing this cover
letter it was too late to just remove.
Also rather than create the destination host TLS objects on the fly,
I modified the command line generation. That model could change to adding
the TLS objects once the destination is started and before the params are
set for the migration.
This 'model' is also going to be used for the NBD, but I figured I'd get
this posted now since it was already too long of a series.
v2: Changes
Reorder the patches to put the reused 'chardev' code up front. Most of
these patches were "ok" along the way, but only one was officially ACK'd
(and that was pushed).
Patch1 is new - based off code review comment to create a common New
function for secinfo allocation
Patch2 is adjusted to use Patch1
Patch3 is new based on review comment and having ExitMonitor outside
the virSaveLastError ... virSetError
Patch4 mainly follows older logic with adjustments as suggested during
code review
Patch5 -> Patch8 had minor changes as a result of other suggestions
Patch9 just removed the _set logic
Patch10 fixed the order/placement of VIR_MIGRATE_TLS
Patch11 is the old patch1 w/ the fixed #undef
Patch12 is the old patch2 w/o changes
Patch13 Alters the server logic to create the objects on the fly rather
that via command line. It also introduces 3 helpers to perform the
migration TLS manipulation
Patch14 similarly uses those API's
AFAIU - removal of the objects would remove the migration tls-creds,
tls-hostname settings.
NB:
I left the cfg->migrateTLS in for now - it's very simple to remove, but
there would still need to be a key on something to ensure the migrateTLS
environment has been properly configured since that would mean the default
environment would need to be used/configured. Setting up the default
environment is keyed off having the migrateTLS defined. That's all part
of the qemu_conf reading logic.
John Ferlan (14):
qemu: Introduce qemuDomainSecretInfoNew
qemu: Introduce qemuDomainSecretMigratePrepare
qemu: Move exit monitor calls in failure paths
qemu: Refactor hotplug to introduce qemuDomain{Add|Del}TLSObjects
qemu: Refactor qemuDomainGetChardevTLSObjects to converge code
qemu: Move qemuDomainSecretChardevPrepare call
qemu: Move qemuDomainPrepareChardevSourceTLS call
qemu: Introduce qemuDomainGetTLSObjects
qemu: Add TLS params to _qemuMonitorMigrationParams
Add new migration flag VIR_MIGRATE_TLS
qemu: Create #define for TLS configuration setup.
conf: Introduce migrate_tls_x509_cert_dir
qemu: Set up the migrate TLS objects for target
qemu: Set up the migration TLS objects for source
include/libvirt/libvirt-domain.h | 8 +
src/qemu/libvirtd_qemu.aug | 6 +
src/qemu/qemu.conf | 39 +++++
src/qemu/qemu_conf.c | 45 +++--
src/qemu/qemu_conf.h | 5 +
src/qemu/qemu_domain.c | 195 +++++++++++++--------
src/qemu/qemu_domain.h | 89 ++++++----
src/qemu/qemu_hotplug.c | 343 ++++++++++++++++++++-----------------
src/qemu/qemu_hotplug.h | 24 +++
src/qemu/qemu_migration.c | 200 +++++++++++++++++++++
src/qemu/qemu_migration.h | 3 +-
src/qemu/qemu_monitor.c | 11 +-
src/qemu/qemu_monitor.h | 3 +
src/qemu/qemu_monitor_json.c | 10 ++
src/qemu/test_libvirtd_qemu.aug.in | 4 +
tools/virsh-domain.c | 7 +
16 files changed, 705 insertions(+), 287 deletions(-)
--
2.9.3
5
39