[libvirt] [PATCH 0/2] qemu: kill junk process
by Martin Kletzander
First patch tries to kill qemu possibly left running by previous
daemon and second patch is just a reproducer for the issue; if you
start daemon with that patch applied, it will kill itself in the
appropriate time so you don't have to quickly start/kill the daemon
over and over again.
Martin Kletzander (2):
qemu: make sure capability probing process can start
DO NOT APPLY: Reproducer for patch 1/2
src/qemu/qemu_capabilities.c | 45 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 45 insertions(+)
--
2.1.2
10 years, 1 month
[libvirt] [RFC PATCH] iface-unbridge: Output in-use domain list instead of removing a in-used bridge forcibly
by Lin Ma
virsh iface-unbridge directly removes the bridge even though there are guest
interfaces attach to.
This patch outputs in-use domain list instead of removing bridge in this case.
I knew that generally virsh-*.[ch] files are self contained, But for iterating all of domains, The patch broke this rule,
So I sent it as RFC to see whether the patch makes sense or if there is a better way.
Signed-off-by: Lin Ma <lma(a)suse.com>
---
tools/virsh-domain-monitor.c | 275 -------------------------------------------
tools/virsh-domain.c | 268 +++++++++++++++++++++++++++++++++++++++++
tools/virsh-domain.h | 13 ++
tools/virsh-interface.c | 99 ++++++++++++++++
4 files changed, 380 insertions(+), 275 deletions(-)
diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c
index 2af0d4f..b3d1b31 100644
--- a/tools/virsh-domain-monitor.c
+++ b/tools/virsh-domain-monitor.c
@@ -1478,281 +1478,6 @@ static const vshCmdInfo info_list[] = {
{.name = NULL}
};
-/* compare domains, pack NULLed ones at the end*/
-static int
-vshDomainSorter(const void *a, const void *b)
-{
- virDomainPtr *da = (virDomainPtr *) a;
- virDomainPtr *db = (virDomainPtr *) b;
- unsigned int ida;
- unsigned int idb;
- unsigned int inactive = (unsigned int) -1;
-
- if (*da && !*db)
- return -1;
-
- if (!*da)
- return *db != NULL;
-
- ida = virDomainGetID(*da);
- idb = virDomainGetID(*db);
-
- if (ida == inactive && idb == inactive)
- return vshStrcasecmp(virDomainGetName(*da), virDomainGetName(*db));
-
- if (ida != inactive && idb != inactive) {
- if (ida > idb)
- return 1;
- else if (ida < idb)
- return -1;
- }
-
- if (ida != inactive)
- return -1;
- else
- return 1;
-}
-
-struct vshDomainList {
- virDomainPtr *domains;
- size_t ndomains;
-};
-typedef struct vshDomainList *vshDomainListPtr;
-
-static void
-vshDomainListFree(vshDomainListPtr domlist)
-{
- size_t i;
-
- if (domlist && domlist->domains) {
- for (i = 0; i < domlist->ndomains; i++) {
- if (domlist->domains[i])
- virDomainFree(domlist->domains[i]);
- }
- VIR_FREE(domlist->domains);
- }
- VIR_FREE(domlist);
-}
-
-static vshDomainListPtr
-vshDomainListCollect(vshControl *ctl, unsigned int flags)
-{
- vshDomainListPtr list = vshMalloc(ctl, sizeof(*list));
- size_t i;
- int ret;
- int *ids = NULL;
- int nids = 0;
- char **names = NULL;
- int nnames = 0;
- virDomainPtr dom;
- bool success = false;
- size_t deleted = 0;
- int persistent;
- int autostart;
- int state;
- int nsnap;
- int mansave;
-
- /* try the list with flags support (0.9.13 and later) */
- if ((ret = virConnectListAllDomains(ctl->conn, &list->domains,
- flags)) >= 0) {
- list->ndomains = ret;
- goto finished;
- }
-
- /* check if the command is actually supported */
- if (last_error && last_error->code == VIR_ERR_NO_SUPPORT) {
- vshResetLibvirtError();
- goto fallback;
- }
-
- if (last_error && last_error->code == VIR_ERR_INVALID_ARG) {
- /* try the new API again but mask non-guaranteed flags */
- unsigned int newflags = flags & (VIR_CONNECT_LIST_DOMAINS_ACTIVE |
- VIR_CONNECT_LIST_DOMAINS_INACTIVE);
-
- vshResetLibvirtError();
- if ((ret = virConnectListAllDomains(ctl->conn, &list->domains,
- newflags)) >= 0) {
- list->ndomains = ret;
- goto filter;
- }
- }
-
- /* there was an error during the first or second call */
- vshError(ctl, "%s", _("Failed to list domains"));
- goto cleanup;
-
-
- fallback:
- /* fall back to old method (0.9.12 and older) */
- vshResetLibvirtError();
-
- /* list active domains, if necessary */
- if (!VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE) ||
- VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_ACTIVE)) {
- if ((nids = virConnectNumOfDomains(ctl->conn)) < 0) {
- vshError(ctl, "%s", _("Failed to list active domains"));
- goto cleanup;
- }
-
- if (nids) {
- ids = vshMalloc(ctl, sizeof(int) * nids);
-
- if ((nids = virConnectListDomains(ctl->conn, ids, nids)) < 0) {
- vshError(ctl, "%s", _("Failed to list active domains"));
- goto cleanup;
- }
- }
- }
-
- if (!VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE) ||
- VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_INACTIVE)) {
- if ((nnames = virConnectNumOfDefinedDomains(ctl->conn)) < 0) {
- vshError(ctl, "%s", _("Failed to list inactive domains"));
- goto cleanup;
- }
-
- if (nnames) {
- names = vshMalloc(ctl, sizeof(char *) * nnames);
-
- if ((nnames = virConnectListDefinedDomains(ctl->conn, names,
- nnames)) < 0) {
- vshError(ctl, "%s", _("Failed to list inactive domains"));
- goto cleanup;
- }
- }
- }
-
- list->domains = vshMalloc(ctl, sizeof(virDomainPtr) * (nids + nnames));
- list->ndomains = 0;
-
- /* get active domains */
- for (i = 0; i < nids; i++) {
- if (!(dom = virDomainLookupByID(ctl->conn, ids[i])))
- continue;
- list->domains[list->ndomains++] = dom;
- }
-
- /* get inactive domains */
- for (i = 0; i < nnames; i++) {
- if (!(dom = virDomainLookupByName(ctl->conn, names[i])))
- continue;
- list->domains[list->ndomains++] = dom;
- }
-
- /* truncate domains that weren't found */
- deleted = (nids + nnames) - list->ndomains;
-
- filter:
- /* filter list the list if the list was acquired by fallback means */
- for (i = 0; i < list->ndomains; i++) {
- dom = list->domains[i];
-
- /* persistence filter */
- if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_PERSISTENT)) {
- if ((persistent = virDomainIsPersistent(dom)) < 0) {
- vshError(ctl, "%s", _("Failed to get domain persistence info"));
- goto cleanup;
- }
-
- if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_PERSISTENT) && persistent) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_TRANSIENT) && !persistent)))
- goto remove_entry;
- }
-
- /* domain state filter */
- if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_STATE)) {
- if (virDomainGetState(dom, &state, NULL, 0) < 0) {
- vshError(ctl, "%s", _("Failed to get domain state"));
- goto cleanup;
- }
-
- if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_RUNNING) &&
- state == VIR_DOMAIN_RUNNING) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_PAUSED) &&
- state == VIR_DOMAIN_PAUSED) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_SHUTOFF) &&
- state == VIR_DOMAIN_SHUTOFF) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_OTHER) &&
- (state != VIR_DOMAIN_RUNNING &&
- state != VIR_DOMAIN_PAUSED &&
- state != VIR_DOMAIN_SHUTOFF))))
- goto remove_entry;
- }
-
- /* autostart filter */
- if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_AUTOSTART)) {
- if (virDomainGetAutostart(dom, &autostart) < 0) {
- vshError(ctl, "%s", _("Failed to get domain autostart state"));
- goto cleanup;
- }
-
- if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_AUTOSTART) && autostart) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_NO_AUTOSTART) && !autostart)))
- goto remove_entry;
- }
-
- /* managed save filter */
- if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_MANAGEDSAVE)) {
- if ((mansave = virDomainHasManagedSaveImage(dom, 0)) < 0) {
- vshError(ctl, "%s",
- _("Failed to check for managed save image"));
- goto cleanup;
- }
-
- if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_MANAGEDSAVE) && mansave) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_NO_MANAGEDSAVE) && !mansave)))
- goto remove_entry;
- }
-
- /* snapshot filter */
- if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_SNAPSHOT)) {
- if ((nsnap = virDomainSnapshotNum(dom, 0)) < 0) {
- vshError(ctl, "%s", _("Failed to get snapshot count"));
- goto cleanup;
- }
- if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_HAS_SNAPSHOT) && nsnap > 0) ||
- (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_NO_SNAPSHOT) && nsnap == 0)))
- goto remove_entry;
- }
-
- /* the domain matched all filters, it may stay */
- continue;
-
- remove_entry:
- /* the domain has to be removed as it failed one of the filters */
- virDomainFree(list->domains[i]);
- list->domains[i] = NULL;
- deleted++;
- }
-
- finished:
- /* sort the list */
- if (list->domains && list->ndomains)
- qsort(list->domains, list->ndomains, sizeof(*list->domains),
- vshDomainSorter);
-
- /* truncate the list if filter simulation deleted entries */
- if (deleted)
- VIR_SHRINK_N(list->domains, list->ndomains, deleted);
-
- success = true;
-
- cleanup:
- for (i = 0; nnames != -1 && i < nnames; i++)
- VIR_FREE(names[i]);
-
- if (!success) {
- vshDomainListFree(list);
- list = NULL;
- }
-
- VIR_FREE(names);
- VIR_FREE(ids);
- return list;
-}
-
static const vshCmdOptDef opts_list[] = {
{.name = "inactive",
.type = VSH_OT_BOOL,
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index 12550ff..b83e670 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -60,6 +60,274 @@
# define SA_SIGINFO 0
#endif
+/* compare domains, pack NULLed ones at the end*/
+int
+vshDomainSorter(const void *a, const void *b)
+{
+ virDomainPtr *da = (virDomainPtr *) a;
+ virDomainPtr *db = (virDomainPtr *) b;
+ unsigned int ida;
+ unsigned int idb;
+ unsigned int inactive = (unsigned int) -1;
+
+ if (*da && !*db)
+ return -1;
+
+ if (!*da)
+ return *db != NULL;
+
+ ida = virDomainGetID(*da);
+ idb = virDomainGetID(*db);
+
+ if (ida == inactive && idb == inactive)
+ return vshStrcasecmp(virDomainGetName(*da), virDomainGetName(*db));
+
+ if (ida != inactive && idb != inactive) {
+ if (ida > idb)
+ return 1;
+ else if (ida < idb)
+ return -1;
+ }
+
+ if (ida != inactive)
+ return -1;
+ else
+ return 1;
+}
+
+void
+vshDomainListFree(vshDomainListPtr domlist)
+{
+ size_t i;
+
+ if (domlist && domlist->domains) {
+ for (i = 0; i < domlist->ndomains; i++) {
+ if (domlist->domains[i])
+ virDomainFree(domlist->domains[i]);
+ }
+ VIR_FREE(domlist->domains);
+ }
+ VIR_FREE(domlist);
+}
+
+vshDomainListPtr
+vshDomainListCollect(vshControl *ctl, unsigned int flags)
+{
+ vshDomainListPtr list = vshMalloc(ctl, sizeof(*list));
+ size_t i;
+ int ret;
+ int *ids = NULL;
+ int nids = 0;
+ char **names = NULL;
+ int nnames = 0;
+ virDomainPtr dom;
+ bool success = false;
+ size_t deleted = 0;
+ int persistent;
+ int autostart;
+ int state;
+ int nsnap;
+ int mansave;
+
+ /* try the list with flags support (0.9.13 and later) */
+ if ((ret = virConnectListAllDomains(ctl->conn, &list->domains,
+ flags)) >= 0) {
+ list->ndomains = ret;
+ goto finished;
+ }
+
+ /* check if the command is actually supported */
+ if (last_error && last_error->code == VIR_ERR_NO_SUPPORT) {
+ vshResetLibvirtError();
+ goto fallback;
+ }
+
+ if (last_error && last_error->code == VIR_ERR_INVALID_ARG) {
+ /* try the new API again but mask non-guaranteed flags */
+ unsigned int newflags = flags & (VIR_CONNECT_LIST_DOMAINS_ACTIVE |
+ VIR_CONNECT_LIST_DOMAINS_INACTIVE);
+
+ vshResetLibvirtError();
+ if ((ret = virConnectListAllDomains(ctl->conn, &list->domains,
+ newflags)) >= 0) {
+ list->ndomains = ret;
+ goto filter;
+ }
+ }
+
+ /* there was an error during the first or second call */
+ vshError(ctl, "%s", _("Failed to list domains"));
+ goto cleanup;
+
+
+ fallback:
+ /* fall back to old method (0.9.12 and older) */
+ vshResetLibvirtError();
+
+ /* list active domains, if necessary */
+ if (!VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE) ||
+ VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_ACTIVE)) {
+ if ((nids = virConnectNumOfDomains(ctl->conn)) < 0) {
+ vshError(ctl, "%s", _("Failed to list active domains"));
+ goto cleanup;
+ }
+
+ if (nids) {
+ ids = vshMalloc(ctl, sizeof(int) * nids);
+
+ if ((nids = virConnectListDomains(ctl->conn, ids, nids)) < 0) {
+ vshError(ctl, "%s", _("Failed to list active domains"));
+ goto cleanup;
+ }
+ }
+ }
+
+ if (!VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_ACTIVE) ||
+ VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_INACTIVE)) {
+ if ((nnames = virConnectNumOfDefinedDomains(ctl->conn)) < 0) {
+ vshError(ctl, "%s", _("Failed to list inactive domains"));
+ goto cleanup;
+ }
+
+ if (nnames) {
+ names = vshMalloc(ctl, sizeof(char *) * nnames);
+
+ if ((nnames = virConnectListDefinedDomains(ctl->conn, names,
+ nnames)) < 0) {
+ vshError(ctl, "%s", _("Failed to list inactive domains"));
+ goto cleanup;
+ }
+ }
+ }
+
+ list->domains = vshMalloc(ctl, sizeof(virDomainPtr) * (nids + nnames));
+ list->ndomains = 0;
+
+ /* get active domains */
+ for (i = 0; i < nids; i++) {
+ if (!(dom = virDomainLookupByID(ctl->conn, ids[i])))
+ continue;
+ list->domains[list->ndomains++] = dom;
+ }
+
+ /* get inactive domains */
+ for (i = 0; i < nnames; i++) {
+ if (!(dom = virDomainLookupByName(ctl->conn, names[i])))
+ continue;
+ list->domains[list->ndomains++] = dom;
+ }
+
+ /* truncate domains that weren't found */
+ deleted = (nids + nnames) - list->ndomains;
+
+ filter:
+ /* filter list the list if the list was acquired by fallback means */
+ for (i = 0; i < list->ndomains; i++) {
+ dom = list->domains[i];
+
+ /* persistence filter */
+ if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_PERSISTENT)) {
+ if ((persistent = virDomainIsPersistent(dom)) < 0) {
+ vshError(ctl, "%s", _("Failed to get domain persistence info"));
+ goto cleanup;
+ }
+
+ if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_PERSISTENT) && persistent) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_TRANSIENT) && !persistent)))
+ goto remove_entry;
+ }
+
+ /* domain state filter */
+ if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_STATE)) {
+ if (virDomainGetState(dom, &state, NULL, 0) < 0) {
+ vshError(ctl, "%s", _("Failed to get domain state"));
+ goto cleanup;
+ }
+
+ if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_RUNNING) &&
+ state == VIR_DOMAIN_RUNNING) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_PAUSED) &&
+ state == VIR_DOMAIN_PAUSED) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_SHUTOFF) &&
+ state == VIR_DOMAIN_SHUTOFF) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_OTHER) &&
+ (state != VIR_DOMAIN_RUNNING &&
+ state != VIR_DOMAIN_PAUSED &&
+ state != VIR_DOMAIN_SHUTOFF))))
+ goto remove_entry;
+ }
+
+ /* autostart filter */
+ if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_AUTOSTART)) {
+ if (virDomainGetAutostart(dom, &autostart) < 0) {
+ vshError(ctl, "%s", _("Failed to get domain autostart state"));
+ goto cleanup;
+ }
+
+ if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_AUTOSTART) && autostart) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_NO_AUTOSTART) && !autostart)))
+ goto remove_entry;
+ }
+
+ /* managed save filter */
+ if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_MANAGEDSAVE)) {
+ if ((mansave = virDomainHasManagedSaveImage(dom, 0)) < 0) {
+ vshError(ctl, "%s",
+ _("Failed to check for managed save image"));
+ goto cleanup;
+ }
+
+ if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_MANAGEDSAVE) && mansave) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_NO_MANAGEDSAVE) && !mansave)))
+ goto remove_entry;
+ }
+
+ /* snapshot filter */
+ if (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_FILTERS_SNAPSHOT)) {
+ if ((nsnap = virDomainSnapshotNum(dom, 0)) < 0) {
+ vshError(ctl, "%s", _("Failed to get snapshot count"));
+ goto cleanup;
+ }
+ if (!((VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_HAS_SNAPSHOT) && nsnap > 0) ||
+ (VSH_MATCH(VIR_CONNECT_LIST_DOMAINS_NO_SNAPSHOT) && nsnap == 0)))
+ goto remove_entry;
+ }
+
+ /* the domain matched all filters, it may stay */
+ continue;
+
+ remove_entry:
+ /* the domain has to be removed as it failed one of the filters */
+ virDomainFree(list->domains[i]);
+ list->domains[i] = NULL;
+ deleted++;
+ }
+
+ finished:
+ /* sort the list */
+ if (list->domains && list->ndomains)
+ qsort(list->domains, list->ndomains, sizeof(*list->domains),
+ vshDomainSorter);
+
+ /* truncate the list if filter simulation deleted entries */
+ if (deleted)
+ VIR_SHRINK_N(list->domains, list->ndomains, deleted);
+
+ success = true;
+
+ cleanup:
+ for (i = 0; nnames != -1 && i < nnames; i++)
+ VIR_FREE(names[i]);
+
+ if (!success) {
+ vshDomainListFree(list);
+ list = NULL;
+ }
+
+ VIR_FREE(names);
+ VIR_FREE(ids);
+ return list;
+}
static virDomainPtr
vshLookupDomainInternal(vshControl *ctl,
diff --git a/tools/virsh-domain.h b/tools/virsh-domain.h
index f46538f..2e0d11a 100644
--- a/tools/virsh-domain.h
+++ b/tools/virsh-domain.h
@@ -41,4 +41,17 @@ virDomainPtr vshCommandOptDomainBy(vshControl *ctl, const vshCmd *cmd,
extern const vshCmdDef domManagementCmds[];
+int vshDomainSorter(const void *a, const void *b);
+
+struct vshDomainList {
+ virDomainPtr *domains;
+ size_t ndomains;
+};
+
+typedef struct vshDomainList *vshDomainListPtr;
+
+void vshDomainListFree(vshDomainListPtr domlist);
+
+vshDomainListPtr vshDomainListCollect(vshControl *ctl, unsigned int flags);
+
#endif /* VIRSH_DOMAIN_H */
diff --git a/tools/virsh-interface.c b/tools/virsh-interface.c
index 6cacaf1..28b9e22 100644
--- a/tools/virsh-interface.c
+++ b/tools/virsh-interface.c
@@ -25,6 +25,7 @@
#include <config.h>
#include "virsh-interface.h"
+#include "virsh-domain.h"
#include <libxml/parser.h>
#include <libxml/tree.h>
@@ -40,6 +41,91 @@
#include "virxml.h"
#include "virstring.h"
+static bool
+vshBridgeInUse(vshControl *ctl, const char *br_name, char ***inuse_domnames)
+{
+ bool ret = false;
+ char **inuse_list = NULL;
+ vshDomainListPtr list = NULL;
+ virDomainPtr dom;
+ size_t i,j;
+ char *xml = NULL;
+ xmlDocPtr xmldoc = NULL;
+ xmlXPathContextPtr ctxt = NULL;
+ int ninterfaces;
+ int count = 0;
+ xmlNodePtr *interfaces = NULL;
+
+ if (!(list = vshDomainListCollect(ctl, VIR_CONNECT_LIST_DOMAINS_ACTIVE))) {
+ vshError(ctl, "%s", _("Failed to get active domains list"));
+ exit(EXIT_FAILURE);
+ }
+
+ if (list->ndomains <= 0)
+ goto cleanup;
+
+ if (VIR_ALLOC_N(inuse_list, list->ndomains) < 0) {
+ vshError(ctl, "%s", _("Failed to allocate inuse_list"));
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < list->ndomains; i++) {
+ dom = list->domains[i];
+ xml = virDomainGetXMLDesc(dom, 0);
+
+ if (!xml) {
+ vshError(ctl, "%s", _("Failed to get domain xml desc"));
+ exit(EXIT_FAILURE);
+ }
+
+ xmldoc = virXMLParseStringCtxt(xml, _("(domain_definition)"), &ctxt);
+
+ if (!xmldoc) {
+ vshError(ctl, "%s", _("Failed to parse string context"));
+ exit(EXIT_FAILURE);
+ }
+
+ ninterfaces = virXPathNodeSet("./devices/interface", ctxt, &interfaces);
+
+ if (ninterfaces < 0) {
+ vshError(ctl, "%s", _("Failed to count interfaces"));
+ exit(EXIT_FAILURE);
+ }
+
+ for (j = 0; j < ninterfaces; j++) {
+ char *source = NULL;
+ char *target = NULL;
+ ctxt->node = interfaces[j];
+ source = virXPathString("string(./source/@bridge"
+ "|./source/@dev"
+ "|./source/@network"
+ "|./source/@name)", ctxt);
+ target = virXPathString("string(./target/@dev)", ctxt);
+
+ if (target && source && br_name && STREQ(source, br_name)) {
+ inuse_list[count++] = vshStrdup(ctl, virDomainGetName(dom));
+ ret = true;
+ VIR_FREE(source);
+ VIR_FREE(target);
+ break;
+ }
+
+ VIR_FREE(source);
+ VIR_FREE(target);
+ }
+
+ VIR_FREE(interfaces);
+ VIR_FREE(xml);
+ xmlFreeDoc(xmldoc);
+ xmlXPathFreeContext(ctxt);
+ }
+ *inuse_domnames = inuse_list;
+
+ cleanup:
+ vshDomainListFree(list);
+ return ret;
+}
+
virInterfacePtr
vshCommandOptInterfaceBy(vshControl *ctl, const vshCmd *cmd,
const char *optname,
@@ -1043,6 +1129,9 @@ cmdInterfaceUnbridge(vshControl *ctl, const vshCmd *cmd)
xmlDocPtr xml_doc = NULL;
xmlXPathContextPtr ctxt = NULL;
xmlNodePtr top_node, if_node, cur;
+ int i;
+ size_t inuse_count;
+ char **inuse_list = NULL;
/* Get a handle to the original device */
if (!(br_handle = vshCommandOptInterfaceBy(ctl, cmd, "bridge",
@@ -1084,6 +1173,16 @@ cmdInterfaceUnbridge(vshControl *ctl, const vshCmd *cmd)
}
VIR_FREE(if_name);
+ if (vshBridgeInUse(ctl, br_name, &inuse_list)) {
+ inuse_count = virStringListLength(inuse_list);
+ vshPrint(ctl, "The bridge %s is in use by other guests:", br_name);
+ for (i = 0; i < inuse_count; i++)
+ vshPrint(ctl, "%s %s", i != 0 ? "," : "", inuse_list[i]);
+ vshPrint(ctl, "\n");
+ virStringFreeList(inuse_list);
+ goto cleanup;
+ }
+
/* Find the <bridge> node under <interface>. */
if (virXPathNode("./bridge", ctxt) == NULL) {
vshError(ctl, "%s", _("No bridge node in xml document"));
--
1.8.4
10 years, 1 month
[libvirt] [PATCH v3 2/2] network: Add code for setting network bandwidth for ethernet interfaces
by Anirban Chakraborty
Signed-off-by: Anirban Chakraborty <abchak(a)juniper.net>
---
src/conf/domain_conf.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index f03599e..91da1ec 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -2852,7 +2852,8 @@ static inline bool virNetDevSupportBandwidth(int type)
{
return ((type == VIR_DOMAIN_NET_TYPE_BRIDGE ||
type == VIR_DOMAIN_NET_TYPE_NETWORK ||
- type == VIR_DOMAIN_NET_TYPE_DIRECT) ? true : false);
+ type == VIR_DOMAIN_NET_TYPE_DIRECT ||
+ type == VIR_DOMAIN_NET_TYPE_ETHERNET) ? true : false);
};
#endif /* __DOMAIN_CONF_H */
--
1.9.1
10 years, 1 month
[libvirt] [PowerPC Patch 0/4] Libvirt CPU enhancements for Power KVM
by Prerna Saxena
This patch series is a collection of enhancements for PowerPC CPUs on PowerKVM.
Series Summary:
==========
Patch 1/4 : Introduce a new architecture 'ppc64le' for libvirt.
Patch 2/4 : Add libvirt support for VMs running in 'compat' mode on Power KVM.
Patch 3/4 : Optimize PVR comparison for PowerPC CPUs.
Patch 4/4 : Correctly model available CPUs in response to PowerPC QEMU implementation.
Detail:
====
* PowerPC has traditionally been a Big-endian architecture. However, with PowerPC ISA version 2.07, it can run in Little-endian mode as well. IBM Power8 processors, compliant with ISA 2.07 allow
launching VMs in
little-endian mode. This is signified by 'ppc64le' architecture. Patch 1 adds this support to libvirt, to allow running VMs based on ppc64le architecture.
* Patch 2-4 tweak libvirt to correctly model PowerPC CPUs based on recent PowerKVM implementation.
PowerKVM permits VMs with vcpus in the following allowed modes :
i) Host native mode:
where the vcpu seen in the VM belongs to the same processor generation as the host.
Example: A power8 host, conforming to PowerISA version 2.07, will run VMs with "power8" vcpus.
ii) Binary Compatibility ("compat") mode:
PowerISA allows processors to run VMs in binary compatibility ("compat") mode supporting an older version of ISA.
As an example: In compatibility mode, a POWER8 host can run a "power7" VM conforming to PowerISA v2.06, while a POWER7 host can run a "power6" VM, conforming to power ISA v2.05.
QEMU has recently added support to explicitly denote a VM running in compatibility mode through commits 6d9412ea & 8dfa3a5e85. Henceforth, VMs of type (i) will be invoked with the QEMU invocation
"-cpu host", while VMs of type (ii) will be invoked using "-cpu host, compat=power6".
Now, an explicit cpu selection using "-cpu POWER6" is moot, and the recommended practice is to use the matching compat mode, if the requested cpu type differs from the host.
Patches 2-4 address various aspects of this change.
* Patch 2 : Adds support for generating the correct command line for QEMU. New xml semantics are introduced to signify this type.
* Patch 3 : PowerKVM vCPUs differ uniquely across generations ( such as power6, power7, power8). Each generation signifies a new PowerISA version that exhibits features unique to that generation. The
higher order 16 bits of PVR denote the processor generation and the lower order 16 bits denote the cpu chip (sub)version.
For all practical purposes of launching a VM, we care about the generation the vCPU will belong to, and not specifically the chip version. In fact, PowerKVM does not seek out specification of a unique
chip version(such as POWER7_v2.3) for running a vCPU. This patch updates the libvirt PVR check to reflect this relationship.
* Patch 4: As specified in QEMU commit 8dfa3a5e85, the supported modes for a "compat" CPU are power6/power7/power8. This patch amends cpu_map.xml to understand these modes. PPC64 CPU Test cases are
also updated accordingly.
This patch also includes a test case for compat mode, introduced in patch 2.
Changelog:
======
Patch 1/4 : This is v1.
Patch 2/4 : Rebased initial patch https://www.redhat.com/archives/libvir-list/2014-June/msg01338.html
Patch 3/4, 4/4 : This is v2, addressing response https://www.redhat.com/archives/libvir-list/2014-August/msg00859.html
--
Prerna Saxena
Linux Technology Centre,
IBM Systems and Technology Lab,
Bangalore, India
10 years, 1 month
[libvirt] [PATCHv2 0/3] Save domain satus after change some parameters
by Shanzhi Yu
Related bug: https://bugzilla.redhat.com/show_bug.cgi?id=1146511
Also add BeginJob/EndJob in qemuDomainSetBlkioParameters,
qemuDomainSetInterfaceParameters,qemuDomainSetNumaParameters
Shanzhi Yu (3):
qemu: save domain status after set the blkio parameters
qemu: call qemuDomainObjBeginJob/qemuDomainObjEndJob in
qemuDomainSetInterfaceParameters
qemu: save domain status after set domain's numa parameters
src/qemu/qemu_driver.c | 76 ++++++++++++++++++++++++++++++++++----------------
1 file changed, 52 insertions(+), 24 deletions(-)
--
1.9.3
10 years, 1 month
[libvirt] [PATCH V3 0/3] Some improvements for video model
by Wang Rui
From: Zeng Junliang <zengjunliang(a)huawei.com>
http://www.redhat.com/archives/libvir-list/2014-July/msg00644.html
diff to v2:
- hide vram attribute silently instead of reporting an error.
- introduce three new capabilities for vga.vgamem_mb, vmvga.vgamem_mb and qxl.vgamem_mb.
- fix some error reported by building libvirt.
Zeng Junliang (3):
qemu: Hide vram attribute for some useless cases.
qemu: Introduce vgamem attribute for video model
qemu: Add secondary-vga support
docs/formatdomain.html.in | 46 +++---
docs/schemas/domaincommon.rng | 6 +
src/conf/domain_conf.c | 57 +++++++-
src/conf/domain_conf.h | 3 +
src/libvirt_private.syms | 1 +
src/qemu/qemu_capabilities.c | 17 +++
src/qemu/qemu_capabilities.h | 4 +
src/qemu/qemu_command.c | 162 +++++++++++++++------
src/qemu/qemu_domain.c | 12 ++
tests/qemucapabilitiesdata/caps_1.2.2-1.caps | 3 +
tests/qemucapabilitiesdata/caps_1.3.1-1.caps | 3 +
tests/qemucapabilitiesdata/caps_1.4.2-1.caps | 3 +
tests/qemucapabilitiesdata/caps_1.5.3-1.caps | 3 +
tests/qemucapabilitiesdata/caps_1.6.0-1.caps | 3 +
tests/qemucapabilitiesdata/caps_1.6.50-1.caps | 3 +
tests/qemuhelptest.c | 10 +-
...qemuhotplug-console-compat-2+console-virtio.xml | 2 +-
.../qemuxml2argv-console-compat-2.xml | 2 +-
.../qemuxml2argv-controller-order.xml | 2 +-
.../qemuxml2argv-graphics-listen-network.xml | 2 +-
.../qemuxml2argv-graphics-listen-network2.xml | 2 +-
.../qemuxml2argv-graphics-sdl-fullscreen.xml | 2 +-
.../qemuxml2argvdata/qemuxml2argv-graphics-sdl.xml | 2 +-
...emuxml2argv-graphics-spice-agent-file-xfer.args | 5 +-
...qemuxml2argv-graphics-spice-agent-file-xfer.xml | 4 +-
.../qemuxml2argv-graphics-spice-agentmouse.xml | 2 +-
.../qemuxml2argv-graphics-spice-compression.args | 4 +-
.../qemuxml2argv-graphics-spice-compression.xml | 4 +-
.../qemuxml2argv-graphics-spice-listen-network.xml | 4 +-
.../qemuxml2argv-graphics-spice-qxl-vga.args | 4 +-
.../qemuxml2argv-graphics-spice-qxl-vga.xml | 4 +-
.../qemuxml2argv-graphics-spice-sasl.args | 3 +-
.../qemuxml2argv-graphics-spice-sasl.xml | 2 +-
.../qemuxml2argv-graphics-spice-timeout.xml | 2 +-
.../qemuxml2argv-graphics-spice.args | 5 +-
.../qemuxml2argv-graphics-spice.xml | 4 +-
.../qemuxml2argv-graphics-vnc-policy.xml | 2 +-
.../qemuxml2argv-graphics-vnc-sasl.xml | 2 +-
.../qemuxml2argv-graphics-vnc-secondary-vga.args | 7 +
.../qemuxml2argv-graphics-vnc-secondary-vga.xml | 39 +++++
.../qemuxml2argv-graphics-vnc-socket.xml | 2 +-
.../qemuxml2argv-graphics-vnc-std-vga.args | 4 +
.../qemuxml2argv-graphics-vnc-std-vga.xml | 36 +++++
.../qemuxml2argv-graphics-vnc-tls.xml | 2 +-
.../qemuxml2argv-graphics-vnc-vmware-svga.args | 4 +
.../qemuxml2argv-graphics-vnc-vmware-svga.xml | 36 +++++
.../qemuxml2argv-graphics-vnc-websocket.xml | 2 +-
.../qemuxml2argvdata/qemuxml2argv-graphics-vnc.xml | 2 +-
.../qemuxml2argv-net-bandwidth.xml | 2 +-
.../qemuxml2argv-pci-autoadd-addr.xml | 2 +-
.../qemuxml2argv-pci-autoadd-idx.xml | 2 +-
tests/qemuxml2argvdata/qemuxml2argv-pci-bridge.xml | 2 +-
.../qemuxml2argv-pcihole64-q35.args | 3 +-
.../qemuxml2argv-pcihole64-q35.xml | 2 +-
.../qemuxml2argvdata/qemuxml2argv-pseries-disk.xml | 2 +-
tests/qemuxml2argvdata/qemuxml2argv-q35.args | 3 +-
tests/qemuxml2argvdata/qemuxml2argv-q35.xml | 2 +-
.../qemuxml2argv-serial-spiceport.args | 4 +-
.../qemuxml2argv-serial-spiceport.xml | 2 +-
.../qemuxml2argv-video-device-pciaddr-default.args | 9 +-
.../qemuxml2argv-video-device-pciaddr-default.xml | 6 +-
tests/qemuxml2argvtest.c | 26 +++-
.../qemuxml2xmlout-graphics-listen-network2.xml | 2 +-
.../qemuxml2xmlout-graphics-spice-timeout.xml | 2 +-
.../qemuxml2xmlout-pci-autoadd-addr.xml | 2 +-
.../qemuxml2xmlout-pci-autoadd-idx.xml | 2 +-
tests/qemuxml2xmloutdata/qemuxml2xmlout-q35.xml | 2 +-
tests/qemuxml2xmltest.c | 3 +
68 files changed, 494 insertions(+), 121 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-graphics-vnc-secondary-vga.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-graphics-vnc-secondary-vga.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-graphics-vnc-std-vga.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-graphics-vnc-std-vga.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-graphics-vnc-vmware-svga.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-graphics-vnc-vmware-svga.xml
--
1.7.12.4
10 years, 1 month
[libvirt] [PATCH] Fix leftover typo '&' -> '&&'
by Martin Kletzander
The actual origin of this so called typo are two commits. The first one
was commit 72f8a7f that came up with the following condition:
if ((i == 8) & (flags & VIR_QEMU_PROCESS_KILL_FORCE))
Fortunately this succeeded thanks to bool being (int)1 and
VIR_QEMU_PROCESS_KILL_FORCE having the value of 1 << 0. The check was
then moved and altered in 8fd38231179c394f07d8a26bcbf3a0faa5eeaf24 to
current state:
if ((i == 50) & force)
that will work again (both sides of '&' being booleans), but since this
was missed so many times, it may pose a problem in the future in case it
gets copy-pasted again.
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
src/util/virprocess.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/util/virprocess.c b/src/util/virprocess.c
index 486123a..6266cbe 100644
--- a/src/util/virprocess.c
+++ b/src/util/virprocess.c
@@ -354,7 +354,7 @@ virProcessKillPainfully(pid_t pid, bool force)
int signum;
if (i == 0) {
signum = SIGTERM; /* kindly suggest it should exit */
- } else if ((i == 50) & force) {
+ } else if ((i == 50) && force) {
VIR_DEBUG("Timed out waiting after SIGTERM to process %lld, "
"sending SIGKILL", (long long)pid);
/* No SIGKILL kill on Win32 ! Use SIGABRT instead which our
--
2.1.2
10 years, 1 month
Re: [libvirt] Implementation of new features for Hyper-V Libvirt driver
by Eric Blake
[the original message got eaten by the moderator queue because of size
violation]
On 12/31/1969 05:00 PM, wrote:
> Hi all,
>
> I've just submitted a set of 21 patchs for the implementation of new features in the hyperv libvirt driver.
> For your convenience, I have attached to this email a document giving more details about this delivery.
Rather than sending a 450k pdf describing the changes, just submit the
changes themselves, one email per change. 'git send-email origin
--cover-letter --annotate' can work miracles at giving you a chance to
write a 0/21 cover letter that summarizes the rest of the series, and
then 21 emails at one per patch. Doing it in this manner will make it
much more likely that your series gets a timely review.
--
Eric Blake eblake redhat com +1-919-301-3266
Libvirt virtualization library http://libvirt.org
10 years, 1 month
[libvirt] [Question] capabilities.pidfile is left behind while starting and stopping libvirtd repeatly
by Wang Yufei
Hi, all
I started and stopped libvirtd service repeatly with high frequency(1 per second), and found that the file capabilities.pidfile is left behind, as well as a qemu process. If I then restart libvirtd, qemu-kvm will fail to start as it's unable to flock capabilities.pidfile's fd.
Steps to reproduce the problem:
1. start libvirtd service per second with a shell script.
2. meanwhile, stop libvirtd service per second with another shell script.
3. then, a process qemu is left behind:
/usr/bin/qemu-kvm -S -no-user-config -nodefaults -nographic -M none -qmp unix:/var/lib/libvirt/qemu/capabilities.monitor.sock,server,nowait -pidfile /var/lib/libvirt/qemu/capabilities.pidfile -daemonize
4. file /var/lib/libvirt/qemu/capabilities.pidfile is left behind
5. start libvirtd again, the process qemu-kvm fails to start.
The cause of the problem:
This file is generated by qemu, and deleted by libvirtd. If libvirtd got killed before it removes the pidfile, it would be left behind then.
Question:
Would it be fine if I kill qemu-kvm process and unlink capabilities.pidfile, just before virQEMUCapsInitQMP runs this qemu-kvm process?
--
Best Regards
Wang Yufei
10 years, 1 month
[libvirt] [PATCH v3 0/3] Add support for shared memory devices
by Martin Kletzander
v3:
- removed the dead code for server start attribute
- fixed default path in documentation
- removed unnecessary check for shmem size
v2 is here:
https://www.redhat.com/archives/libvir-list/2014-September/msg01519.html
v1 (Maxime's) is here:
https://www.redhat.com/archives/libvir-list/2014-August/msg01032.html
Martin Kletzander (2):
docs, conf, schema: add support for shmem device
qemu: Build command line for ivshmem device
Maxime Leroy (1):
qemu: add capability probing for ivshmem device
docs/formatdomain.html.in | 52 ++++++
docs/schemas/domaincommon.rng | 39 ++++
src/conf/domain_conf.c | 198 ++++++++++++++++++++-
src/conf/domain_conf.h | 24 +++
src/qemu/qemu_capabilities.c | 2 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_command.c | 118 +++++++++++-
src/qemu/qemu_command.h | 1 +
src/qemu/qemu_hotplug.c | 1 +
tests/qemucapabilitiesdata/caps_1.2.2-1.caps | 1 +
tests/qemucapabilitiesdata/caps_1.3.1-1.caps | 1 +
tests/qemucapabilitiesdata/caps_1.4.2-1.caps | 1 +
tests/qemucapabilitiesdata/caps_1.5.3-1.caps | 1 +
tests/qemucapabilitiesdata/caps_1.6.0-1.caps | 1 +
tests/qemucapabilitiesdata/caps_1.6.50-1.caps | 1 +
tests/qemucapabilitiesdata/caps_2.1.1-1.caps | 1 +
tests/qemuhelptest.c | 15 +-
.../qemuxml2argv-shmem-invalid-size.xml | 24 +++
.../qemuxml2argv-shmem-msi-only.xml | 24 +++
.../qemuxml2argv-shmem-small-size.xml | 24 +++
tests/qemuxml2argvdata/qemuxml2argv-shmem.args | 16 ++
tests/qemuxml2argvdata/qemuxml2argv-shmem.xml | 51 ++++++
tests/qemuxml2argvtest.c | 7 +
tests/qemuxml2xmltest.c | 1 +
24 files changed, 598 insertions(+), 7 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-shmem-invalid-size.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-shmem-msi-only.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-shmem-small-size.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-shmem.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-shmem.xml
--
2.1.1
10 years, 1 month