[libvirt] [PATCH v2] snapshot: Store both config and live XML in the snapshot domain
by Maxiwell S. Garcia
The snapshot-create operation of running guests saves the live
XML and uses it to replace the active and inactive domain in
case of revert. So, the config XML is ignored by the snapshot
process. This commit changes it and adds the config XML in the
snapshot XML as the <inactiveDomain> entry.
In case of offline guest, the behavior remains the same and the
config XML is saved in the snapshot XML as <domain> entry. The
behavior of older snapshots of running guests, that don't have
the new <inactiveDomain>, remains the same too. The revert, in
this case, overrides both active and inactive domain with the
<domain> entry. So, the <inactiveDomain> in the snapshot XML is
not required to snapshot work, but it's useful to preserve the
config XML of running guests.
Signed-off-by: Maxiwell S. Garcia <maxiwell(a)linux.ibm.com>
---
src/conf/moment_conf.c | 1 +
src/conf/moment_conf.h | 11 +++++++++
src/conf/snapshot_conf.c | 48 +++++++++++++++++++++++++++++++++++-----
src/qemu/qemu_driver.c | 27 +++++++++++++++++-----
src/util/virxml.c | 45 +++++++++++++++++++++++++++++++++++++
src/util/virxml.h | 8 +++++++
6 files changed, 130 insertions(+), 10 deletions(-)
diff --git a/src/conf/moment_conf.c b/src/conf/moment_conf.c
index fea13f0f97..f54a44b33e 100644
--- a/src/conf/moment_conf.c
+++ b/src/conf/moment_conf.c
@@ -66,6 +66,7 @@ virDomainMomentDefDispose(void *obj)
VIR_FREE(def->description);
VIR_FREE(def->parent_name);
virDomainDefFree(def->dom);
+ virDomainDefFree(def->inactiveDom);
}
/* Provide defaults for creation time and moment name after parsing XML */
diff --git a/src/conf/moment_conf.h b/src/conf/moment_conf.h
index 9fdbef2172..70cc47bd70 100644
--- a/src/conf/moment_conf.h
+++ b/src/conf/moment_conf.h
@@ -36,7 +36,18 @@ struct _virDomainMomentDef {
char *parent_name;
long long creationTime; /* in seconds */
+ /*
+ * Store the active domain definition in case of online
+ * guest and the inactive domain definition in case of
+ * offline guest
+ */
virDomainDefPtr dom;
+
+ /*
+ * Store the inactive domain definition in case of online
+ * guest and leave NULL in case of offline guest
+ */
+ virDomainDefPtr inactiveDom;
};
virClassPtr virClassForDomainMomentDef(void);
diff --git a/src/conf/snapshot_conf.c b/src/conf/snapshot_conf.c
index 324901a560..8aeac9ab20 100644
--- a/src/conf/snapshot_conf.c
+++ b/src/conf/snapshot_conf.c
@@ -243,6 +243,8 @@ virDomainSnapshotDefParse(xmlXPathContextPtr ctxt,
char *memoryFile = NULL;
bool offline = !!(flags & VIR_DOMAIN_SNAPSHOT_PARSE_OFFLINE);
virSaveCookieCallbacksPtr saveCookie = virDomainXMLOptionGetSaveCookie(xmlopt);
+ int domainflags = VIR_DOMAIN_DEF_PARSE_INACTIVE |
+ VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE;
if (!(def = virDomainSnapshotDefNew()))
return NULL;
@@ -292,8 +294,6 @@ virDomainSnapshotDefParse(xmlXPathContextPtr ctxt,
* clients will have to decide between best effort
* initialization or outright failure. */
if ((tmp = virXPathString("string(./domain/@type)", ctxt))) {
- int domainflags = VIR_DOMAIN_DEF_PARSE_INACTIVE |
- VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE;
xmlNodePtr domainNode = virXPathNode("./domain", ctxt);
VIR_FREE(tmp);
@@ -309,6 +309,20 @@ virDomainSnapshotDefParse(xmlXPathContextPtr ctxt,
} else {
VIR_WARN("parsing older snapshot that lacks domain");
}
+
+ /* /inactiveDomain entry saves the config XML present in a running
+ * VM. In case of absent, leave parent.inactiveDom NULL and use
+ * parent.dom for config and live XML. */
+ if (virXPathString("string(./inactiveDomain/@type)", ctxt)) {
+ xmlNodePtr domainNode = virXPathNode("./inactiveDomain", ctxt);
+
+ if (domainNode) {
+ def->parent.inactiveDom = virDomainDefParseNode(ctxt->node->doc, domainNode,
+ caps, xmlopt, NULL, domainflags);
+ if (!def->parent.inactiveDom)
+ goto cleanup;
+ }
+ }
} else if (virDomainXMLOptionRunMomentPostParse(xmlopt, &def->parent) < 0) {
goto cleanup;
}
@@ -845,6 +859,10 @@ virDomainSnapshotDefFormatInternal(virBufferPtr buf,
{
size_t i;
int domainflags = VIR_DOMAIN_DEF_FORMAT_INACTIVE;
+ virBuffer inactivedom_buf = VIR_BUFFER_INITIALIZER;
+ xmlXPathContextPtr inactivedom_ctxt = NULL;
+ char *inactivedom_str = NULL;
+ int ret = -1;
if (flags & VIR_DOMAIN_SNAPSHOT_FORMAT_SECURE)
domainflags |= VIR_DOMAIN_DEF_FORMAT_SECURE;
@@ -903,6 +921,20 @@ virDomainSnapshotDefFormatInternal(virBufferPtr buf,
virBufferAddLit(buf, "</domain>\n");
}
+ if (def->parent.inactiveDom) {
+ if (virDomainDefFormatInternal(def->parent.inactiveDom, caps,
+ domainflags, &inactivedom_buf, xmlopt) < 0)
+ goto error;
+
+ inactivedom_ctxt = virXPathBuildContext(&inactivedom_buf);
+ if (!(inactivedom_str = virXPathRenameNode("/domain", "inactiveDomain",
+ inactivedom_ctxt)))
+ goto error;
+
+ virBufferAddStr(buf, inactivedom_str);
+ virBufferAddLit(buf, "\n");
+ }
+
if (virSaveCookieFormatBuf(buf, def->cookie,
virDomainXMLOptionGetSaveCookie(xmlopt)) < 0)
goto error;
@@ -917,11 +949,17 @@ virDomainSnapshotDefFormatInternal(virBufferPtr buf,
if (virBufferCheckError(buf) < 0)
goto error;
- return 0;
+ ret = 0;
error:
- virBufferFreeAndReset(buf);
- return -1;
+ VIR_FREE(inactivedom_str);
+ xmlXPathFreeContext(inactivedom_ctxt);
+ virBufferFreeAndReset(&inactivedom_buf);
+
+ if (ret < 0)
+ virBufferFreeAndReset(buf);
+
+ return ret;
}
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 482f915b67..9b95e9b766 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -15697,6 +15697,13 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
goto endjob;
+ if (vm->newDef) {
+ def->parent.inactiveDom = virDomainDefCopy(vm->newDef, caps,
+ driver->xmlopt, NULL, true);
+ if (!def->parent.inactiveDom)
+ goto endjob;
+ }
+
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) {
align_location = VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL;
align_match = false;
@@ -16231,6 +16238,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
qemuDomainObjPrivatePtr priv;
int rc;
virDomainDefPtr config = NULL;
+ virDomainDefPtr inactiveConfig = NULL;
virQEMUDriverConfigPtr cfg = NULL;
virCapsPtr caps = NULL;
bool was_stopped = false;
@@ -16331,17 +16339,22 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
* in the failure cases where we know there was no change? */
}
- /* Prepare to copy the snapshot inactive xml as the config of this
- * domain.
- *
- * XXX Should domain snapshots track live xml rather
- * than inactive xml? */
+ /* Prepare to copy the snapshot inactive domain as the config XML
+ * and the snapshot domain as the live XML. In case of inactive domain
+ * NULL, both config and live XML will be copied from snapshot domain.
+ */
if (snap->def->dom) {
config = virDomainDefCopy(snap->def->dom, caps,
driver->xmlopt, NULL, true);
if (!config)
goto endjob;
}
+ if (snap->def->inactiveDom) {
+ inactiveConfig = virDomainDefCopy(snap->def->inactiveDom, caps,
+ driver->xmlopt, NULL, true);
+ if (!inactiveConfig)
+ goto endjob;
+ }
cookie = (qemuDomainSaveCookiePtr) snapdef->cookie;
@@ -16592,6 +16605,10 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
goto endjob;
}
+ if (inactiveConfig) {
+ virDomainDefFree(vm->newDef);
+ VIR_STEAL_PTR(vm->newDef, inactiveConfig);
+ }
ret = 0;
endjob:
diff --git a/src/util/virxml.c b/src/util/virxml.c
index f55b9a362c..756c0eedbc 100644
--- a/src/util/virxml.c
+++ b/src/util/virxml.c
@@ -1408,3 +1408,48 @@ virXPathContextNodeRestore(virXPathContextNodeSavePtr save)
save->ctxt->node = save->node;
}
+
+
+/**
+ * virXPathBuildContext: convert an parent buffer to an
+ * XPath context ptr. The caller has to free the ptr.
+ */
+xmlXPathContextPtr
+virXPathBuildContext(virBufferPtr root)
+{
+ xmlDocPtr doc;
+
+ if (!root)
+ return NULL;
+
+ doc = virXMLParseString(virBufferCurrentContent(root), NULL);
+ if (!doc)
+ return NULL;
+
+ return xmlXPathNewContext(doc);
+}
+
+
+/**
+ * virXPathRenameNode: get the XML node using the 'xpath' and
+ * rename it with the 'newname' string.
+ *
+ * Returns the XML string of the node found by 'xpath' or NULL
+ * on error. The caller has to free the string.
+ */
+char *
+virXPathRenameNode(const char *xpath,
+ const char *newname,
+ xmlXPathContextPtr ctxt)
+{
+ xmlNodePtr node;
+
+ if (!xpath || !newname || !ctxt)
+ return NULL;
+
+ if (!(node = virXPathNode(xpath, ctxt)))
+ return NULL;
+
+ xmlNodeSetName(node, (xmlChar *) newname);
+ return virXMLNodeToString(node->doc, node);
+}
diff --git a/src/util/virxml.h b/src/util/virxml.h
index 6208977dd1..48a507c3c1 100644
--- a/src/util/virxml.h
+++ b/src/util/virxml.h
@@ -220,6 +220,14 @@ virXMLFormatElement(virBufferPtr buf,
virBufferPtr childBuf)
ATTRIBUTE_RETURN_CHECK;
+xmlXPathContextPtr
+virXPathBuildContext(virBufferPtr root);
+
+char *
+virXPathRenameNode(const char *xpath,
+ const char *newname,
+ xmlXPathContextPtr ctxt);
+
struct _virXPathContextNodeSave {
xmlXPathContextPtr ctxt;
xmlNodePtr node;
--
2.20.1
5 years, 4 months
[libvirt] [dockerfiles PATCH v2 0/4] Add libosinfo Dockerfiles
by Fabiano Fidêncio
This patch series aims to add libosinfo Dockerfiles to this project, so
those containers can be used by libosinfo projects' gitlab CI.
Please, take a look at each patch for more context of what has been done.
Changes since v1:
https://www.redhat.com/archives/libvir-list/2019-July/msg01296.html
- Update libosinfo Dockerfiles after having libvirt-jenkins-ci's patches
related to libosinfo/osinfo-db-tools dependencies merged.
Fabiano Fidêncio (4):
refresh: Fix typo: ingores -> ignored
refresh: Learn how to deal with the project's name
refresh: Add libosinfo project
Add Dockerfiles for libosinfo project
buildenv-libosinfo-centos-7.Dockerfile | 43 +++++++++++++
...bosinfo-debian-10-cross-aarch64.Dockerfile | 64 +++++++++++++++++++
...ibosinfo-debian-10-cross-armv6l.Dockerfile | 64 +++++++++++++++++++
...ibosinfo-debian-10-cross-armv7l.Dockerfile | 64 +++++++++++++++++++
...-libosinfo-debian-10-cross-i686.Dockerfile | 64 +++++++++++++++++++
...-libosinfo-debian-10-cross-mips.Dockerfile | 64 +++++++++++++++++++
...osinfo-debian-10-cross-mips64el.Dockerfile | 64 +++++++++++++++++++
...ibosinfo-debian-10-cross-mipsel.Dockerfile | 64 +++++++++++++++++++
...bosinfo-debian-10-cross-ppc64le.Dockerfile | 64 +++++++++++++++++++
...libosinfo-debian-10-cross-s390x.Dockerfile | 64 +++++++++++++++++++
buildenv-libosinfo-debian-10.Dockerfile | 50 +++++++++++++++
...ibosinfo-debian-9-cross-aarch64.Dockerfile | 64 +++++++++++++++++++
...libosinfo-debian-9-cross-armv6l.Dockerfile | 64 +++++++++++++++++++
...libosinfo-debian-9-cross-armv7l.Dockerfile | 64 +++++++++++++++++++
...v-libosinfo-debian-9-cross-mips.Dockerfile | 64 +++++++++++++++++++
...bosinfo-debian-9-cross-mips64el.Dockerfile | 64 +++++++++++++++++++
...libosinfo-debian-9-cross-mipsel.Dockerfile | 64 +++++++++++++++++++
...ibosinfo-debian-9-cross-ppc64le.Dockerfile | 64 +++++++++++++++++++
...-libosinfo-debian-9-cross-s390x.Dockerfile | 64 +++++++++++++++++++
buildenv-libosinfo-debian-9.Dockerfile | 50 +++++++++++++++
...osinfo-debian-sid-cross-aarch64.Dockerfile | 64 +++++++++++++++++++
...bosinfo-debian-sid-cross-armv6l.Dockerfile | 64 +++++++++++++++++++
...bosinfo-debian-sid-cross-armv7l.Dockerfile | 64 +++++++++++++++++++
...libosinfo-debian-sid-cross-i686.Dockerfile | 64 +++++++++++++++++++
...libosinfo-debian-sid-cross-mips.Dockerfile | 64 +++++++++++++++++++
...sinfo-debian-sid-cross-mips64el.Dockerfile | 64 +++++++++++++++++++
...bosinfo-debian-sid-cross-mipsel.Dockerfile | 64 +++++++++++++++++++
...osinfo-debian-sid-cross-ppc64le.Dockerfile | 64 +++++++++++++++++++
...ibosinfo-debian-sid-cross-s390x.Dockerfile | 64 +++++++++++++++++++
buildenv-libosinfo-debian-sid.Dockerfile | 50 +++++++++++++++
buildenv-libosinfo-fedora-29.Dockerfile | 49 ++++++++++++++
buildenv-libosinfo-fedora-30.Dockerfile | 49 ++++++++++++++
buildenv-libosinfo-fedora-rawhide.Dockerfile | 63 ++++++++++++++++++
buildenv-libosinfo-ubuntu-16.Dockerfile | 50 +++++++++++++++
buildenv-libosinfo-ubuntu-18.Dockerfile | 50 +++++++++++++++
...le => buildenv-libvirt-centos-7.Dockerfile | 0
...libvirt-debian-10-cross-aarch64.Dockerfile | 0
...-libvirt-debian-10-cross-armv6l.Dockerfile | 0
...-libvirt-debian-10-cross-armv7l.Dockerfile | 0
...nv-libvirt-debian-10-cross-i686.Dockerfile | 0
...nv-libvirt-debian-10-cross-mips.Dockerfile | 0
...ibvirt-debian-10-cross-mips64el.Dockerfile | 0
...-libvirt-debian-10-cross-mipsel.Dockerfile | 0
...libvirt-debian-10-cross-ppc64le.Dockerfile | 0
...v-libvirt-debian-10-cross-s390x.Dockerfile | 0
...e => buildenv-libvirt-debian-10.Dockerfile | 0
...-libvirt-debian-9-cross-aarch64.Dockerfile | 0
...v-libvirt-debian-9-cross-armv6l.Dockerfile | 0
...v-libvirt-debian-9-cross-armv7l.Dockerfile | 0
...env-libvirt-debian-9-cross-mips.Dockerfile | 0
...libvirt-debian-9-cross-mips64el.Dockerfile | 0
...v-libvirt-debian-9-cross-mipsel.Dockerfile | 0
...-libvirt-debian-9-cross-ppc64le.Dockerfile | 0
...nv-libvirt-debian-9-cross-s390x.Dockerfile | 0
...le => buildenv-libvirt-debian-9.Dockerfile | 0
...ibvirt-debian-sid-cross-aarch64.Dockerfile | 0
...libvirt-debian-sid-cross-armv6l.Dockerfile | 0
...libvirt-debian-sid-cross-armv7l.Dockerfile | 0
...v-libvirt-debian-sid-cross-i686.Dockerfile | 0
...v-libvirt-debian-sid-cross-mips.Dockerfile | 0
...bvirt-debian-sid-cross-mips64el.Dockerfile | 0
...libvirt-debian-sid-cross-mipsel.Dockerfile | 0
...ibvirt-debian-sid-cross-ppc64le.Dockerfile | 0
...-libvirt-debian-sid-cross-s390x.Dockerfile | 0
... => buildenv-libvirt-debian-sid.Dockerfile | 0
...e => buildenv-libvirt-fedora-29.Dockerfile | 0
...e => buildenv-libvirt-fedora-30.Dockerfile | 0
...buildenv-libvirt-fedora-rawhide.Dockerfile | 0
...e => buildenv-libvirt-ubuntu-16.Dockerfile | 0
...e => buildenv-libvirt-ubuntu-18.Dockerfile | 0
refresh | 60 +++++++++++++----
71 files changed, 2166 insertions(+), 12 deletions(-)
create mode 100644 buildenv-libosinfo-centos-7.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-aarch64.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-armv6l.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-armv7l.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-i686.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-mips.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-mips64el.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-mipsel.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-ppc64le.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10-cross-s390x.Dockerfile
create mode 100644 buildenv-libosinfo-debian-10.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-aarch64.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-armv6l.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-armv7l.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-mips.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-mips64el.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-mipsel.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-ppc64le.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9-cross-s390x.Dockerfile
create mode 100644 buildenv-libosinfo-debian-9.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-aarch64.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-armv6l.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-armv7l.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-i686.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-mips.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-mips64el.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-mipsel.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-ppc64le.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid-cross-s390x.Dockerfile
create mode 100644 buildenv-libosinfo-debian-sid.Dockerfile
create mode 100644 buildenv-libosinfo-fedora-29.Dockerfile
create mode 100644 buildenv-libosinfo-fedora-30.Dockerfile
create mode 100644 buildenv-libosinfo-fedora-rawhide.Dockerfile
create mode 100644 buildenv-libosinfo-ubuntu-16.Dockerfile
create mode 100644 buildenv-libosinfo-ubuntu-18.Dockerfile
rename buildenv-centos-7.Dockerfile => buildenv-libvirt-centos-7.Dockerfile (100%)
rename buildenv-debian-10-cross-aarch64.Dockerfile => buildenv-libvirt-debian-10-cross-aarch64.Dockerfile (100%)
rename buildenv-debian-10-cross-armv6l.Dockerfile => buildenv-libvirt-debian-10-cross-armv6l.Dockerfile (100%)
rename buildenv-debian-10-cross-armv7l.Dockerfile => buildenv-libvirt-debian-10-cross-armv7l.Dockerfile (100%)
rename buildenv-debian-10-cross-i686.Dockerfile => buildenv-libvirt-debian-10-cross-i686.Dockerfile (100%)
rename buildenv-debian-10-cross-mips.Dockerfile => buildenv-libvirt-debian-10-cross-mips.Dockerfile (100%)
rename buildenv-debian-10-cross-mips64el.Dockerfile => buildenv-libvirt-debian-10-cross-mips64el.Dockerfile (100%)
rename buildenv-debian-10-cross-mipsel.Dockerfile => buildenv-libvirt-debian-10-cross-mipsel.Dockerfile (100%)
rename buildenv-debian-10-cross-ppc64le.Dockerfile => buildenv-libvirt-debian-10-cross-ppc64le.Dockerfile (100%)
rename buildenv-debian-10-cross-s390x.Dockerfile => buildenv-libvirt-debian-10-cross-s390x.Dockerfile (100%)
rename buildenv-debian-10.Dockerfile => buildenv-libvirt-debian-10.Dockerfile (100%)
rename buildenv-debian-9-cross-aarch64.Dockerfile => buildenv-libvirt-debian-9-cross-aarch64.Dockerfile (100%)
rename buildenv-debian-9-cross-armv6l.Dockerfile => buildenv-libvirt-debian-9-cross-armv6l.Dockerfile (100%)
rename buildenv-debian-9-cross-armv7l.Dockerfile => buildenv-libvirt-debian-9-cross-armv7l.Dockerfile (100%)
rename buildenv-debian-9-cross-mips.Dockerfile => buildenv-libvirt-debian-9-cross-mips.Dockerfile (100%)
rename buildenv-debian-9-cross-mips64el.Dockerfile => buildenv-libvirt-debian-9-cross-mips64el.Dockerfile (100%)
rename buildenv-debian-9-cross-mipsel.Dockerfile => buildenv-libvirt-debian-9-cross-mipsel.Dockerfile (100%)
rename buildenv-debian-9-cross-ppc64le.Dockerfile => buildenv-libvirt-debian-9-cross-ppc64le.Dockerfile (100%)
rename buildenv-debian-9-cross-s390x.Dockerfile => buildenv-libvirt-debian-9-cross-s390x.Dockerfile (100%)
rename buildenv-debian-9.Dockerfile => buildenv-libvirt-debian-9.Dockerfile (100%)
rename buildenv-debian-sid-cross-aarch64.Dockerfile => buildenv-libvirt-debian-sid-cross-aarch64.Dockerfile (100%)
rename buildenv-debian-sid-cross-armv6l.Dockerfile => buildenv-libvirt-debian-sid-cross-armv6l.Dockerfile (100%)
rename buildenv-debian-sid-cross-armv7l.Dockerfile => buildenv-libvirt-debian-sid-cross-armv7l.Dockerfile (100%)
rename buildenv-debian-sid-cross-i686.Dockerfile => buildenv-libvirt-debian-sid-cross-i686.Dockerfile (100%)
rename buildenv-debian-sid-cross-mips.Dockerfile => buildenv-libvirt-debian-sid-cross-mips.Dockerfile (100%)
rename buildenv-debian-sid-cross-mips64el.Dockerfile => buildenv-libvirt-debian-sid-cross-mips64el.Dockerfile (100%)
rename buildenv-debian-sid-cross-mipsel.Dockerfile => buildenv-libvirt-debian-sid-cross-mipsel.Dockerfile (100%)
rename buildenv-debian-sid-cross-ppc64le.Dockerfile => buildenv-libvirt-debian-sid-cross-ppc64le.Dockerfile (100%)
rename buildenv-debian-sid-cross-s390x.Dockerfile => buildenv-libvirt-debian-sid-cross-s390x.Dockerfile (100%)
rename buildenv-debian-sid.Dockerfile => buildenv-libvirt-debian-sid.Dockerfile (100%)
rename buildenv-fedora-29.Dockerfile => buildenv-libvirt-fedora-29.Dockerfile (100%)
rename buildenv-fedora-30.Dockerfile => buildenv-libvirt-fedora-30.Dockerfile (100%)
rename buildenv-fedora-rawhide.Dockerfile => buildenv-libvirt-fedora-rawhide.Dockerfile (100%)
rename buildenv-ubuntu-16.Dockerfile => buildenv-libvirt-ubuntu-16.Dockerfile (100%)
rename buildenv-ubuntu-18.Dockerfile => buildenv-libvirt-ubuntu-18.Dockerfile (100%)
--
2.21.0
5 years, 4 months
[libvirt] [PATCH] build: Solve mingw build clash with DATADIR
by Eric Blake
Commit fed58d83 was a hack to fix a mingw build failure due to header
inclusion order resulting in a clash over the use of DATADIR, by
repeating a trick made several other times in the past of tweaking
inclusion order until it goes away. Better is to revert that, and
instead use pragmas to avoid the clash in the first place, regardless
of header ordering, solving it for everyone in the future.
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
I tested that both gcc and clang on F29 support this; but it will take
a full CI run to see if everywhere else is okay with it. Thus, it is
not 5.6 material.
src/util/viratomic.h | 3 +++
src/conf/checkpoint_conf.c | 2 --
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/util/viratomic.h b/src/util/viratomic.h
index 35800dafcd..c6e7668324 100644
--- a/src/util/viratomic.h
+++ b/src/util/viratomic.h
@@ -218,7 +218,10 @@ VIR_STATIC unsigned int virAtomicIntXor(volatile unsigned int *atomic,
# ifdef VIR_ATOMIC_OPS_WIN32
+# pragma push_macro("DATADIR") /* If "configmake.h" was included first */
+# undef DATADIR
# include <winsock2.h>
+# pragma pop_macro("DATADIR")
# include <windows.h>
# include <intrin.h>
# if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64)
diff --git a/src/conf/checkpoint_conf.c b/src/conf/checkpoint_conf.c
index 5ce4cc4853..5f4c275dd8 100644
--- a/src/conf/checkpoint_conf.c
+++ b/src/conf/checkpoint_conf.c
@@ -21,8 +21,6 @@
#include <config.h>
-#include <unistd.h>
-
#include "configmake.h"
#include "internal.h"
#include "virbitmap.h"
--
2.20.1
5 years, 4 months
[libvirt] [PATCH 0/7] qemu: use domCaps for validation
by Cole Robinson
I'm trying to remove some hurdles and pitfalls WRT extending
domaincapabilities data. One issue is that it's too easy to add
invalid data to it, or let the data become out of date.
For example the first two patches of this series add <rng model=X>
domcaps reporting. The logic to fill in the domcaps data from qemuCaps
is nearly identical to the logic we use to validate rng->model in
qemuDomainRNGDefValidate. If just those patches are added, and later
a new qemu rng model was introduced, a future patch could easily
miss updated domaincapabilities output.
This series aims to set up a pattern to prevent these types of issues
from sneaking in. A function virDomainCapsDeviceDefValidate is added
which will use domcaps data to perform validation against a devicedef.
The existing qemu <rng> model validation is moved there. This ensures
that any future <rng> model additions, if they want to work in the
qemu driver, effectively need to extend domaincapabilities as well.
It's also theoretically useful for other drivers too.
One issue is that at DomainDefValidate time we don't have domCaps
handy, or any cache layer for domCaps assembling. Patch #4 adds
a domCapsCache hashtable to the virQEMUCaps class for caching domCaps
builds based on the full tuple of emulator+machine+arch+virttype.
If qemuCaps need to be regenerated, the domCaps cache is wiped out
for us so we don't need to worry about the data being stale, it's
tied to the lifetime of a qemuCaps instance.
Cole Robinson (7):
conf: domcaps: Report device <rng>
qemu: capabilities: fill in domcaps <rng>
qemu: conf: add virQEMUDriverGetDomainCapabilities
qemu: conf: Cache domCaps in qemuCaps
conf: domcaps: Add virDomainCapsDeviceDefValidate
qemu: domain: Call virDomainCapsDeviceDefValidate
qemu: Move rng model validation to domcaps
docs/formatdomaincaps.html.in | 35 ++++++++
docs/schemas/domaincaps.rng | 10 +++
src/conf/domain_capabilities.c | 83 ++++++++++++++++++
src/conf/domain_capabilities.h | 14 ++++
src/libvirt_private.syms | 1 +
src/qemu/qemu_capabilities.c | 41 +++++++++
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_conf.c | 84 +++++++++++++++++++
src/qemu/qemu_conf.h | 7 ++
src/qemu/qemu_domain.c | 38 +++------
src/qemu/qemu_driver.c | 18 +---
.../qemu_1.7.0.x86_64.xml | 9 ++
.../qemu_2.12.0-virt.aarch64.xml | 11 +++
.../qemu_2.12.0.ppc64.xml | 11 +++
.../qemu_2.12.0.s390x.xml | 11 +++
.../qemu_2.12.0.x86_64.xml | 11 +++
.../qemu_2.6.0-virt.aarch64.xml | 11 +++
.../qemu_2.6.0.aarch64.xml | 11 +++
.../domaincapsschemadata/qemu_2.6.0.ppc64.xml | 11 +++
.../qemu_2.6.0.x86_64.xml | 11 +++
.../domaincapsschemadata/qemu_2.7.0.s390x.xml | 11 +++
.../qemu_2.8.0-tcg.x86_64.xml | 11 +++
.../domaincapsschemadata/qemu_2.8.0.s390x.xml | 11 +++
.../qemu_2.8.0.x86_64.xml | 11 +++
.../qemu_2.9.0-q35.x86_64.xml | 11 +++
.../qemu_2.9.0-tcg.x86_64.xml | 11 +++
.../qemu_2.9.0.x86_64.xml | 11 +++
.../domaincapsschemadata/qemu_3.0.0.s390x.xml | 11 +++
.../qemu_4.0.0.x86_64.xml | 11 +++
29 files changed, 488 insertions(+), 40 deletions(-)
--
2.21.0
5 years, 4 months
[libvirt] [PATCH] daemon: improve Xen support in systemd service
by Jim Fehlig
The xencommons service provides all the essential services such as
xenstored, xenconsoled, etc. needed by the libvirt Xen driver, so
libvirtd should be started after xencommons.
The xendomains service uses Xen's xl tool to operate on any domains it
finds running, even those managed by libvirt. Add a conflicts on the
xendomains service to ensure it is not enabled when libvirtd is enabled.
Signed-off-by: Jim Fehlig <jfehlig(a)suse.com>
---
src/remote/libvirtd.service.in | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/remote/libvirtd.service.in b/src/remote/libvirtd.service.in
index 3ddf0e229b..5dbe7cbe72 100644
--- a/src/remote/libvirtd.service.in
+++ b/src/remote/libvirtd.service.in
@@ -15,6 +15,8 @@ After=local-fs.target
After=remote-fs.target
After=systemd-logind.service
After=systemd-machined.service
+After=xencommons.service
+Conflicts=xendomains.service
Documentation=man:libvirtd(8)
Documentation=https://libvirt.org
--
2.22.0
5 years, 4 months
[libvirt] [PATCH 0/4] test_driver: implement FS-related APIs
by Ilias Stamatis
Ilias Stamatis (4):
test_driver: introduce domain-private data
test_driver: implement virDomainFSFreeze
test_driver: implement virDomainFSThaw
test_driver: implement virDomainFSTrim
src/test/test_driver.c | 191 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 190 insertions(+), 1 deletion(-)
--
2.22.0
5 years, 4 months
[libvirt] [PATCH v1 00/31] Introduce NVMe support
by Michal Privoznik
These patches introduce a support for NVMe disks into libvirt. Note that
even without them it is possible to use NVMe disks for your domains in
two ways:
1) <hostdev/> - This is regular PCI assignment with all the drawbacks
(no migration, no snapshots, ...)
2) <disk/> - Since NVMe disks are accessible via /dev/nvme* they can be
assigned to domains. Problem is, because qemu is accessing /dev/nvme*
the host kernel's storage stack is involved which adds significant
latency [1].
Solution to this problem is to combine 1) and 2) together:
- Bypass host kernel's storage stack by detaching the NVMe disk from the
host (and attaching it to VFIO driver), and
- Plug the NVMe disk into qemu's block layer so that all fancy features
can be supported.
On qemu command line this is done via:
-drive file.driver=nvme,file.device=0000:01:00.0,file.namespace=1,format=raw,\
if=none,id=drive-virtio-disk0 \
-device virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,\
id=virtio-disk0,bootindex=1 \
You can find my patches also on my github [2].
1: https://www.linux-kvm.org/images/4/4c/Userspace_NVMe_driver_in_QEMU_-_Fam...
2: https://github.com/zippy2/libvirt/commits/nvme
Michal Prívozník (31):
virHostdevPreparePCIDevices: Separate out function body
virHostdevReAttachPCIDevices: Separate out function body
virpcimock: Move actions checking one level up
Revert "virpcitest: Test virPCIDeviceDetach failure"
virpcimock: Create driver_override file in device dirs
virPCIDeviceAddressEqual: Fix const correctness
virPCIDeviceAddressAsString: Fix const correctness
virpci: Introduce virPCIDeviceAddressCopy
qemuDomainDeviceDefValidateDisk: Reorder some checks
schemas: Introduce disk type NVMe
conf: Format and parse NVMe type disk
util: Introduce virNVMeDevice module
virhostdev: Include virNVMeDevice module
virhostdevtest: Don't proceed to test cases if init failed
virhostdevtest: s/CHECK_LIST_COUNT/CHECK_PCI_LIST_COUNT/
virpcimock: Introduce NVMe driver and devices
virhostdevtest: Test virNVMeDevice assignment
qemu: prepare NVMe devices too
qemu: Take NVMe disks into account when calculating memlock limit
virstoragefile: Introduce virStorageSourceChainHasNVMe
domain_conf: Introduce virDomainDefHasNVMeDisk
qemu_domain: Separate VFIO code
qemu_domain: Introduce NVMe path getting helpers
qemu: Create NVMe disk in domain namespace
qemu: Allow NVMe disk in CGroups
security_selinux: Simplify virSecuritySELinuxSetImageLabelInternal
virSecuritySELinuxRestoreImageLabelInt: Don't skip non-local storage
qemu_capabilities: Introduce QEMU_CAPS_DRIVE_NVME
qemu: Generate command line of NVMe disks
qemu: Don't leak storage perms on failure in
qemuDomainAttachDiskGeneric
qemu_hotplug: Prepare NVMe disks on hotplug
docs/formatdomain.html.in | 45 +-
docs/schemas/domaincommon.rng | 32 ++
src/conf/domain_conf.c | 160 +++++++
src/conf/domain_conf.h | 6 +
src/libvirt_private.syms | 26 ++
src/qemu/qemu_block.c | 24 +
src/qemu/qemu_capabilities.c | 4 +
src/qemu/qemu_capabilities.h | 3 +
src/qemu/qemu_cgroup.c | 59 ++-
src/qemu/qemu_command.c | 4 +
src/qemu/qemu_domain.c | 115 ++++-
src/qemu/qemu_domain.h | 6 +
src/qemu/qemu_driver.c | 4 +
src/qemu/qemu_hostdev.c | 49 ++-
src/qemu/qemu_hostdev.h | 10 +
src/qemu/qemu_hotplug.c | 76 +++-
src/qemu/qemu_migration.c | 1 +
src/qemu/qemu_process.c | 7 +
src/security/security_dac.c | 38 ++
src/security/security_selinux.c | 95 ++--
src/util/Makefile.inc.am | 2 +
src/util/virhostdev.c | 350 +++++++++++++--
src/util/virhostdev.h | 25 ++
src/util/virnvme.c | 412 ++++++++++++++++++
src/util/virnvme.h | 89 ++++
src/util/virpci.c | 12 +-
src/util/virpci.h | 8 +-
src/util/virstoragefile.c | 73 ++++
src/util/virstoragefile.h | 17 +
src/xenconfig/xen_xl.c | 1 +
.../caps_2.12.0.aarch64.xml | 1 +
.../caps_2.12.0.ppc64.xml | 1 +
.../caps_2.12.0.s390x.xml | 1 +
.../caps_2.12.0.x86_64.xml | 1 +
.../qemucapabilitiesdata/caps_3.0.0.ppc64.xml | 1 +
.../caps_3.0.0.riscv32.xml | 1 +
.../caps_3.0.0.riscv64.xml | 1 +
.../qemucapabilitiesdata/caps_3.0.0.s390x.xml | 1 +
.../caps_3.0.0.x86_64.xml | 1 +
.../qemucapabilitiesdata/caps_3.1.0.ppc64.xml | 1 +
.../caps_3.1.0.x86_64.xml | 1 +
.../caps_4.0.0.aarch64.xml | 1 +
.../qemucapabilitiesdata/caps_4.0.0.ppc64.xml | 1 +
.../caps_4.0.0.riscv32.xml | 1 +
.../caps_4.0.0.riscv64.xml | 1 +
.../qemucapabilitiesdata/caps_4.0.0.s390x.xml | 1 +
.../caps_4.0.0.x86_64.xml | 1 +
.../caps_4.1.0.x86_64.xml | 1 +
.../disk-nvme.x86_64-latest.args | 52 +++
tests/qemuxml2argvdata/disk-nvme.xml | 63 +++
tests/qemuxml2argvtest.c | 1 +
tests/qemuxml2xmloutdata/disk-nvme.xml | 1 +
tests/qemuxml2xmltest.c | 1 +
tests/virhostdevtest.c | 185 ++++++--
tests/virpcimock.c | 76 +++-
tests/virpcitest.c | 32 --
tests/virpcitestdata/0000-01-00.0.config | Bin 0 -> 4096 bytes
tests/virpcitestdata/0000-02-00.0.config | Bin 0 -> 4096 bytes
58 files changed, 1978 insertions(+), 204 deletions(-)
create mode 100644 src/util/virnvme.c
create mode 100644 src/util/virnvme.h
create mode 100644 tests/qemuxml2argvdata/disk-nvme.x86_64-latest.args
create mode 100644 tests/qemuxml2argvdata/disk-nvme.xml
create mode 120000 tests/qemuxml2xmloutdata/disk-nvme.xml
create mode 100644 tests/virpcitestdata/0000-01-00.0.config
create mode 100644 tests/virpcitestdata/0000-02-00.0.config
--
2.21.0
5 years, 4 months
[libvirt] [PATCH 1/1] tests/virsh-checkpoint/snapshot: changing 'sed' out filtering
by Daniel Henrique Barboza
There is a chance that the current sed filtering used in
these new tests might fail in some machines due to the
repetition of the 'virsh #' prompt at the same line,
together with valid output that shouldn't be filtered.
This is output of virsh-snapshot test in my T480 dev box:
./virsh-snapshot
--- exp 2019-07-31 18:42:31.107399428 -0300
+++ out.cooked 2019-07-31 18:42:31.108399437 -0300
@@ -1,8 +1,3 @@
-
-
-Domain snapshot s3 created from 's3.xml'
-Domain snapshot s2 created from 's2.xml'
-Name: s2
Domain: test
Current: yes
State: running
There are 3 valid lines missing. This is the unfiltered output:
=== out ===
Welcome to lt-virsh, the virtualization interactive terminal.
Type: 'help' for help with commands
'quit' to quit
virsh # virsh #
virsh #
virsh # virsh # Domain snapshot s3 created from 's3.xml'
virsh # Domain snapshot s2 created from 's2.xml'
virsh # Name: s2
Domain: test
Current: yes
State: running
Location: internal
Parent: s3
Children: 0
Descendants: 0
Metadata: yes
virsh #
============
We can see that the 3 lines being erased are being followed
by the 'virsh #' prompt and the filtering is erasing those.
A similar situation happens with virsh-checkpoint as well.
This patch makes the 'sed' filtering less elegant and more crude
than the current version, but more reliable to these outputs
that may vary from each dev machine. We're also removing
the blank lines in the expected output to make it less
prone to errors as well.
Signed-off-by: Daniel Henrique Barboza <danielhb413(a)gmail.com>
---
Eric, feel free to accept this patch, tweak it, discard it and
try something else or whatever. I was going to send the
commit msg as a reply to your query in the ML, then realized
that I might as well propose a fix for it.
tests/virsh-checkpoint | 6 +-----
tests/virsh-snapshot | 6 ++----
2 files changed, 3 insertions(+), 9 deletions(-)
diff --git a/tests/virsh-checkpoint b/tests/virsh-checkpoint
index 75bdc293be..a3cad74f74 100755
--- a/tests/virsh-checkpoint
+++ b/tests/virsh-checkpoint
@@ -152,20 +152,16 @@ $abs_top_builddir/tools/virsh -c test:///default >out 2>err <<EOF || fail=1
EOF
cat <<\EOF > exp || fail=1
-
-
Domain checkpoint c3 created from 'c3.xml'
Domain checkpoint c2 created from 'c2.xml'
c2
-
Name: c2
Domain: test
Parent: c3
Children: 0
Descendants: 0
-
EOF
-sed '1,/^virsh #/d; /virsh #/d' < out > out.cooked || fail=1
+sed '1,/^virsh #/d; s/virsh #\s//g; /^$/d' < out > out.cooked || fail=1
compare exp out.cooked || fail=1
cat <<EOF > exp || fail=1
diff --git a/tests/virsh-snapshot b/tests/virsh-snapshot
index 20ff966a51..874093ea3c 100755
--- a/tests/virsh-snapshot
+++ b/tests/virsh-snapshot
@@ -201,9 +201,8 @@ $abs_top_builddir/tools/virsh -c test:///default >out 2>err <<EOF || fail=1
snapshot-info test --current
EOF
-cat <<\EOF > exp || fail=1
-
+cat <<\EOF > exp || fail=1
Domain snapshot s3 created from 's3.xml'
Domain snapshot s2 created from 's2.xml'
Name: s2
@@ -215,9 +214,8 @@ Parent: s3
Children: 0
Descendants: 0
Metadata: yes
-
EOF
-sed '1,/^virsh #/d; /virsh #/d' < out > out.cooked || fail=1
+sed '1,/^virsh #/d; s/virsh #\s//g; /^$/d' < out > out.cooked || fail=1
compare exp out.cooked || fail=1
cat <<EOF > exp || fail=1
--
2.21.0
5 years, 4 months