[libvirt] [PATCH] qemu: Do not reattach PCI device used by other domain when shutdown
by Osier Yang
When failing on starting a domain, it tries to reattach all the PCI
devices defined in the domain conf, regardless of whether the devices
are still used by other domain. This will cause the devices are deleted
from the list qemu_driver->activePciHostdevs, thus the devices will be
thought as usable even if it's not true. And following commands
nodedev-{reattach,reset} will be successful.
How to reproduce:
1) Define two domains with same PCI device defined in the confs.
2) # virsh start domain1
3) # virsh start domain2
4) # virsh nodedev-reattach $pci_device
You will see the device will be reattached to host successfully.
As pciDeviceReattach just check if the device is still used by
other domain via checking if the device is in list driver->activePciHostdevs,
however, the device is deleted from the list by step 2).
This patch is to prohibit the bug by:
1) Prohibit a domain starting or device attachment right at
preparation period (qemuPrepareHostdevPCIDevices) if the
device is in list driver->activePciHostdevs, which means
it's used by other domain.
2) Introduces a new field for struct _pciDevice, (char *used_by),
it will be set as the domain name at preparation period,
(qemuPrepareHostdevPCIDevices). Thus we can prohibit deleting
the device from driver->activePciHostdevs if it's still used by
other domain when stopping the domain process.
* src/pci.h (define two internal functions, pciDeviceSetUsedBy and
pciDevceGetUsedBy)
* src/pci.c (new field "char *used_by" for struct _pciDevice,
implementations for the two new functions)
* src/libvirt_private.syms (Add the two new internal functions)
* src/qemu_hostdev.h (Modify the definition of functions
qemuPrepareHostdevPCIDevices, and qemuDomainReAttachHostdevDevices)
* src/qemu_hostdev.c (Prohibit preparation and don't delete the
device from activePciHostdevs list if it's still used by other domain)
* src/qemu_hotplug.c (Update function usage, as the definitions are
changed)
---
src/libvirt_private.syms | 2 ++
src/qemu/qemu_hostdev.c | 31 ++++++++++++++++++++++++++++---
src/qemu/qemu_hostdev.h | 2 ++
src/qemu/qemu_hotplug.c | 4 ++--
src/util/pci.c | 22 ++++++++++++++++++++++
src/util/pci.h | 3 +++
6 files changed, 59 insertions(+), 5 deletions(-)
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 8235ea1..a5c5e6c 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -872,6 +872,7 @@ virNWFilterHashTableRemoveEntry;
pciDettachDevice;
pciDeviceFileIterate;
pciDeviceGetManaged;
+pciDeviceGetUsedBy;
pciDeviceIsAssignable;
pciDeviceIsVirtualFunction;
pciDeviceListAdd;
@@ -884,6 +885,7 @@ pciDeviceListSteal;
pciDeviceNetName;
pciDeviceReAttachInit;
pciDeviceSetManaged;
+pciDeviceSetUsedBy;
pciFreeDevice;
pciGetDevice;
pciGetPhysicalFunction;
diff --git a/src/qemu/qemu_hostdev.c b/src/qemu/qemu_hostdev.c
index 6f77717..ef9e3b7 100644
--- a/src/qemu/qemu_hostdev.c
+++ b/src/qemu/qemu_hostdev.c
@@ -101,6 +101,7 @@ cleanup:
int qemuPrepareHostdevPCIDevices(struct qemud_driver *driver,
+ const char *name,
virDomainHostdevDefPtr *hostdevs,
int nhostdevs)
{
@@ -126,7 +127,10 @@ int qemuPrepareHostdevPCIDevices(struct qemud_driver *driver,
for (i = 0; i < pciDeviceListCount(pcidevs); i++) {
pciDevice *dev = pciDeviceListGet(pcidevs, i);
if (!pciDeviceIsAssignable(dev, !driver->relaxedACS))
- goto reattachdevs;
+ goto cleanup;
+
+ if (pciDeviceListFind(driver->activePciHostdevs, dev))
+ goto cleanup;
if (pciDeviceGetManaged(dev) &&
pciDettachDevice(dev, driver->activePciHostdevs) < 0)
@@ -156,6 +160,14 @@ int qemuPrepareHostdevPCIDevices(struct qemud_driver *driver,
pciDeviceListSteal(pcidevs, dev);
}
+ /* Now set the used_by_domain of the device in driver->activePciHostdevs
+ * as domain name.
+ */
+ for (i = 0; i < pciDeviceListCount(driver->activePciHostdevs); i++) {
+ pciDevice * dev = pciDeviceListGet(driver->activePciHostdevs, i);
+ pciDeviceSetUsedBy(dev, name);
+ }
+
ret = 0;
goto cleanup;
@@ -183,7 +195,7 @@ static int
qemuPrepareHostPCIDevices(struct qemud_driver *driver,
virDomainDefPtr def)
{
- return qemuPrepareHostdevPCIDevices(driver, def->hostdevs, def->nhostdevs);
+ return qemuPrepareHostdevPCIDevices(driver, def->name, def->hostdevs, def->nhostdevs);
}
@@ -258,11 +270,13 @@ void qemuReattachPciDevice(pciDevice *dev, struct qemud_driver *driver)
void qemuDomainReAttachHostdevDevices(struct qemud_driver *driver,
+ const char *name,
virDomainHostdevDefPtr *hostdevs,
int nhostdevs)
{
pciDeviceList *pcidevs;
int i;
+ const char *used_by = NULL;
if (!(pcidevs = qemuGetPciHostDeviceList(hostdevs, nhostdevs))) {
virErrorPtr err = virGetLastError();
@@ -277,6 +291,17 @@ void qemuDomainReAttachHostdevDevices(struct qemud_driver *driver,
for (i = 0; i < pciDeviceListCount(pcidevs); i++) {
pciDevice *dev = pciDeviceListGet(pcidevs, i);
+ pciDevice *activeDev = NULL;
+
+ /* Never delete the dev from list driver->activePciHostdevs
+ * if it's used by other domain.
+ */
+ activeDev = pciDeviceListFind(driver->activePciHostdevs, dev);
+ if (activeDev &&
+ (used_by = pciDeviceGetUsedBy(activeDev)) &&
+ STRNEQ(used_by, name))
+ continue;
+
pciDeviceListDel(driver->activePciHostdevs, dev);
}
@@ -305,5 +330,5 @@ void qemuDomainReAttachHostDevices(struct qemud_driver *driver,
if (!def->nhostdevs)
return;
- qemuDomainReAttachHostdevDevices(driver, def->hostdevs, def->nhostdevs);
+ qemuDomainReAttachHostdevDevices(driver, def->name, def->hostdevs, def->nhostdevs);
}
diff --git a/src/qemu/qemu_hostdev.h b/src/qemu/qemu_hostdev.h
index 1f3d1bc..07d7de2 100644
--- a/src/qemu/qemu_hostdev.h
+++ b/src/qemu/qemu_hostdev.h
@@ -30,12 +30,14 @@
int qemuUpdateActivePciHostdevs(struct qemud_driver *driver,
virDomainDefPtr def);
int qemuPrepareHostdevPCIDevices(struct qemud_driver *driver,
+ const char *name,
virDomainHostdevDefPtr *hostdevs,
int nhostdevs);
int qemuPrepareHostDevices(struct qemud_driver *driver,
virDomainDefPtr def);
void qemuReattachPciDevice(pciDevice *dev, struct qemud_driver *driver);
void qemuDomainReAttachHostdevDevices(struct qemud_driver *driver,
+ const char *name,
virDomainHostdevDefPtr *hostdevs,
int nhostdevs);
void qemuDomainReAttachHostDevices(struct qemud_driver *driver,
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 6cfe392..dc920e7 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -859,7 +859,7 @@ int qemuDomainAttachHostPciDevice(struct qemud_driver *driver,
return -1;
}
- if (qemuPrepareHostdevPCIDevices(driver, &hostdev, 1) < 0)
+ if (qemuPrepareHostdevPCIDevices(driver, vm->def->name, &hostdev, 1) < 0)
return -1;
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE)) {
@@ -925,7 +925,7 @@ error:
hostdev->info.addr.pci.slot) < 0)
VIR_WARN("Unable to release PCI address on host device");
- qemuDomainReAttachHostdevDevices(driver, &hostdev, 1);
+ qemuDomainReAttachHostdevDevices(driver, vm->def->name, &hostdev, 1);
VIR_FREE(devstr);
VIR_FREE(configfd_name);
diff --git a/src/util/pci.c b/src/util/pci.c
index 8d8e157..38548c7 100644
--- a/src/util/pci.c
+++ b/src/util/pci.c
@@ -62,6 +62,7 @@ struct _pciDevice {
char name[PCI_ADDR_LEN]; /* domain:bus:slot.function */
char id[PCI_ID_LEN]; /* product vendor */
char *path;
+ char *used_by; /* The domain which uses the device */
int fd;
unsigned initted;
@@ -1312,6 +1313,7 @@ pciGetDevice(unsigned domain,
dev->bus = bus;
dev->slot = slot;
dev->function = function;
+ dev->used_by = NULL;
if (snprintf(dev->name, sizeof(dev->name), "%.4x:%.2x:%.2x.%.1x",
dev->domain, dev->bus, dev->slot,
@@ -1374,6 +1376,7 @@ pciFreeDevice(pciDevice *dev)
VIR_DEBUG("%s %s: freeing", dev->id, dev->name);
pciCloseConfig(dev);
VIR_FREE(dev->path);
+ VIR_FREE(dev->used_by);
VIR_FREE(dev);
}
@@ -1387,6 +1390,25 @@ unsigned pciDeviceGetManaged(pciDevice *dev)
return dev->managed;
}
+int
+pciDeviceSetUsedBy(pciDevice *dev, const char *name)
+{
+ dev->used_by = strdup(name);
+
+ if (!dev->used_by) {
+ virReportOOMError();
+ return -1;
+ }
+
+ return 0;
+}
+
+const char *
+pciDeviceGetUsedBy(pciDevice *dev)
+{
+ return dev->used_by;
+}
+
void pciDeviceReAttachInit(pciDevice *pci)
{
pci->unbind_from_stub = 1;
diff --git a/src/util/pci.h b/src/util/pci.h
index a1600fe..c9d8227 100644
--- a/src/util/pci.h
+++ b/src/util/pci.h
@@ -47,6 +47,9 @@ int pciResetDevice (pciDevice *dev,
void pciDeviceSetManaged(pciDevice *dev,
unsigned managed);
unsigned pciDeviceGetManaged(pciDevice *dev);
+int pciDeviceSetUsedBy(pciDevice *dev,
+ const char *used_by);
+const char *pciDeviceGetUsedBy(pciDevice *dev);
void pciDeviceReAttachInit(pciDevice *dev);
pciDeviceList *pciDeviceListNew (void);
--
1.7.6
13 years, 1 month
[libvirt] [PATCH 0/5 v2] Improve Ceph Qemu+RBD support
by Sage Weil
The current support for qemu and Ceph RBD (rados block device) has two
main deficiencies: authentication doesn't work, and it relies on
environment variables (which don't work with latest upstream). This
patch set addresses both those problems.
The first two patches update the xml schemas and conf to add a Ceph
secret type and to specify authentication information along with the rbd
disk.
The next two patches make some libvirt changes. We pass virConnectPtr
down into the Domain{Attach,Detach} methods (needed to access secrets
while building the qemu command), and add a helper that will escape
arbitrary characters.
The final patch replaces the current RBD qemu code and uses the new conf
info to do authentication properly. (We still need to make a change
there to avoid having the authentication key show up on qemu command
line; I'll clean that up shortly.)
Comments on this approach?
Thanks!
sage
Changes from v1:
update docs/schemas/{domain,secret}.rng
Sage Weil (5):
secret: add Ceph secret type
storage: add authId, authDomain to virDomainDiskDef
qemu: pass virConnectPtr into Domain{Attach,Detach}*
buf: implement generic virBufferEscape
qemu/rbd: improve rbd device specification
docs/schemas/domain.rng | 6 +
docs/schemas/secret.rng | 17 ++
include/libvirt/libvirt.h.in | 3 +
src/conf/domain_conf.c | 43 +++-
src/conf/domain_conf.h | 2 +
src/conf/secret_conf.c | 45 ++++-
src/conf/secret_conf.h | 1 +
src/libvirt_private.syms | 1 +
src/qemu/qemu_command.c | 273 +++++++++++---------
src/qemu/qemu_command.h | 3 +-
src/qemu/qemu_driver.c | 17 +-
src/qemu/qemu_hotplug.c | 15 +-
src/qemu/qemu_hotplug.h | 9 +-
src/secret/secret_driver.c | 8 +
src/util/buf.c | 33 ++-
src/util/buf.h | 1 +
.../qemuxml2argv-disk-drive-network-rbd.args | 6 +-
.../qemuxml2argv-disk-drive-network-rbd.xml | 1 +
18 files changed, 328 insertions(+), 156 deletions(-)
--
1.7.4.1
>From 498cd06b76bbb4415a2f81f9d169f267ff99329c Mon Sep 17 00:00:00 2001
From: Sage Weil <sage(a)newdream.net>
Date: Thu, 15 Sep 2011 13:47:40 -0700
Subject: [PATCH 0/5] Improve Ceph Qemu+RBD support
The current support for qemu and Ceph RBD (rados block device) has two
main deficiencies: authentication doesn't work, and it relies on
environment variables (which don't work with latest upstream).
This patch set addresses both those problems, while trying to integrate as
cleanly as possible with the rest of libvirt.
The first few patches make some changes to libvirt itself: adding a CEPH
secret type (for Ceph/RBD authentication), adding authentication fields
to the XML schema, passing the virConnectPtr into the
Domain{Attach,Detach} methods (needed to access secrets while building
the qemu command), a helper that will escape arbitrary characters, and
finally a patch that replaces the current RBD qemu code.
Comments on this approach?
Thanks!
sage
Sage Weil (5):
secret: add Ceph secret type
storage: add authId, authDomain to virDomainDiskDef
qemu: pass virConnectPtr into Domain{Attach,Detach}*
buf: implement generic virBufferEscape
qemu/rbd: improve rbd device specification
include/libvirt/libvirt.h.in | 3 +
src/conf/domain_conf.c | 43 +++-
src/conf/domain_conf.h | 2 +
src/conf/secret_conf.c | 45 ++++-
src/conf/secret_conf.h | 1 +
src/libvirt_private.syms | 1 +
src/qemu/qemu_command.c | 273 +++++++++++---------
src/qemu/qemu_command.h | 3 +-
src/qemu/qemu_driver.c | 17 +-
src/qemu/qemu_hotplug.c | 15 +-
src/qemu/qemu_hotplug.h | 9 +-
src/secret/secret_driver.c | 8 +
src/util/buf.c | 33 ++-
src/util/buf.h | 1 +
.../qemuxml2argv-disk-drive-network-rbd.args | 6 +-
.../qemuxml2argv-disk-drive-network-rbd.xml | 1 +
16 files changed, 305 insertions(+), 156 deletions(-)
--
1.7.4.1
13 years, 1 month
[libvirt] [PATCHv2 0/7] snapshot: listing children
by Eric Blake
Cleaned up the rebase goofs present throughout my v1:
https://www.redhat.com/archives/libvir-list/2011-September/msg01270.html
Eric Blake (7):
snapshot: new virDomainSnapshotListChildrenNames API
snapshot: virsh snapshot-list and children
snapshot: virsh fallback for snapshot-list --tree --from
snapshot: virsh fallback for snapshot-list --from children
snapshot: virsh fallback for snapshot-list --descendants --from
snapshot: remote protocol for snapshot children
snapshot: implement snapshot children listing in qemu
include/libvirt/libvirt.h.in | 27 +++++--
python/generator.py | 4 +
python/libvirt-override-api.xml | 12 ++-
python/libvirt-override.c | 45 +++++++++
src/conf/domain_conf.c | 51 +++++++++++
src/conf/domain_conf.h | 7 ++
src/driver.h | 12 +++
src/libvirt.c | 111 +++++++++++++++++++++++
src/libvirt_private.syms | 2 +
src/libvirt_public.syms | 2 +
src/qemu/qemu_driver.c | 87 ++++++++++++++++++
src/remote/remote_driver.c | 2 +
src/remote/remote_protocol.x | 25 +++++-
src/remote_protocol-structs | 20 ++++
tools/virsh.c | 190 ++++++++++++++++++++++++++++++++------
tools/virsh.pod | 9 ++-
16 files changed, 564 insertions(+), 42 deletions(-)
--
1.7.4.4
13 years, 1 month
[libvirt] [RFC] Adding new filesystem 'proxy' to 9p
by M. Mohan Kumar
Pass-through security model in QEMU 9p server needs root privilege to do few
file operations (like chown, chmod to any mode/uid:gid). There are two issues
in pass-through security model
1) TOCTTOU vulnerability: Following symbolic links in the server could
provide access to files beyond 9p export path.
2) When libvirt is configured to run qemu as non-root user (for example, if
qemu is configured to run as normal user 'qemu'), running file operations on
pass-through security model would fail because it needs root privileges.
To overcome above issues, following approach is suggested: A new filesytem
type 'proxy' is introduced. Proxy FS uses chroot + socket combination for
securing the vulnerability known with following symbolic links. Intention of
adding a new filesystem type is to allow qemu to run in non-root mode, but
doing privileged operations using socket IO.
A new binary (known as proxy helper) will be provided as part of qemu. Proxy
helper will chroot into 9p export path and create a socket pair or a named
socket based on the command line parameter. Qemu and proxy helper will
communicate using this socket.
We need following changes in the libvirt code to accomodate new 'proxy'
filesystem type:
If qemu 9p server is configured to use 'proxy' FS, libvirt will do
* Create a socket pair
* invoke proxy_helper binary with one of the socket id from the pair as
command line parameters to it with root privilege
* invoke qemu with one of socket id from the pair as paramter to qemu virtfs
after dropping to the configured user privilege.
ie, libvirt will invoke proxy_helper as:
proxy_helper -i <socket_fd_from_socket_pair> -p <9p-path-to-export>
and qemu will be invoked with following virtfs parameter:
-virtfs proxy,id=<id>,sock_fd=<socket_fd_from_socket_pair>
,path=/tmp/,security_model=prox,mount_tag=v_pass
People who want to use proxy_helper without libvirt can use following
interface:
$ proxy_helper -s </socket/path> -p <9p-path-to-export>
With following qemu fsdev parameter:
-virtfs proxy,id=<id>,socket=</socket/path>,path=/tmp/,
security_model=prox,mount_tag=v_pass
--
Regards,
M. Mohan Kumar
13 years, 1 month
[libvirt] Qemu/KVM is 3x slower under libvirt
by Reeted
I repost this, this time by also including the libvirt mailing list.
Info on my libvirt: it's the version in Ubuntu 11.04 Natty which is
0.8.8-1ubuntu6.5 . I didn't recompile this one, while Kernel and
qemu-kvm are vanilla and compiled by hand as described below.
My original message follows:
This is really strange.
I just installed a new host with kernel 3.0.3 and Qemu-KVM 0.14.1
compiled by me.
I have created the first VM.
This is on LVM, virtio etc... if I run it directly from bash console, it
boots in 8 seconds (it's a bare ubuntu with no graphics), while if I
boot it under virsh (libvirt) it boots in 20-22 seconds. This is the
time from after Grub to the login prompt, or from after Grub to the
ssh-server up.
I was almost able to replicate the whole libvirt command line on the
bash console, and it still goes almost 3x faster when launched from bash
than with virsh start vmname. The part I wasn't able to replicate is the
-netdev part because I still haven't understood the semantics of it.
This is my bash commandline:
/opt/qemu-kvm-0.14.1/bin/qemu-system-x86_64 -M pc-0.14 -enable-kvm -m
2002 -smp 2,sockets=2,cores=1,threads=1 -name vmname1-1 -uuid
ee75e28a-3bf3-78d9-3cba-65aa63973380 -nodefconfig -nodefaults -chardev
socket,id=charmonitor,path=/var/lib/libvirt/qemu/vmname1-1.monitor,server,nowait
-mon chardev=charmonitor,id=monitor,mode=readline -rtc base=utc -boot
order=dc,menu=on -drive
file=/dev/mapper/vgPtpVM-lvVM_Vmname1_d1,if=none,id=drive-virtio-disk0,boot=on,format=raw,cache=none,aio=native
-device
virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0
-drive
if=none,media=cdrom,id=drive-ide0-1-0,readonly=on,format=raw,cache=none,aio=native
-device ide-drive,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0 -net
nic,model=virtio -net tap,ifname=tap0,script=no,downscript=no -usb -vnc
127.0.0.1:0 -vga cirrus -device
virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5
Which was taken from libvirt's command line. The only modifications I
did to the original libvirt commandline (seen with ps aux) were:
- Removed -S
- Network was: -netdev tap,fd=17,id=hostnet0,vhost=on,vhostfd=18 -device
virtio-net-pci,netdev=hostnet0,id=net0,mac=52:54:00:05:36:60,bus=pci.0,addr=0x3
Has been simplified to: -net nic,model=virtio -net
tap,ifname=tap0,script=no,downscript=no
and manual bridging of the tap0 interface.
Firstly I had thought that this could be fault of the VNC: I have
compiled qemu-kvm with no separate vnc thread. I thought that libvirt
might have connected to the vnc server at all times and this could have
slowed down the whole VM.
But then I also tried connecting vith vncviewer to the KVM machine
launched directly from bash, and the speed of it didn't change. So no,
it doesn't seem to be that.
BTW: is the slowdown of the VM on "no separate vnc thread" only in
effect when somebody is actually connected to VNC, or always?
Also, note that the time difference is not visible in dmesg once the
machine has booted. So it's not a slowdown in detecting devices. Devices
are always detected within the first 3 seconds, according to dmesg, at
3.6 seconds the first ext4 mount begins. It seems to be really the OS
boot that is slow... it seems an hard disk performance problem.
Thank you
R.
13 years, 1 month
[libvirt] [libvirt-glib] API to deal with storage pool(s)
by Zeeshan Ali (Khattak)
From: "Zeeshan Ali (Khattak)" <zeeshanak(a)gnome.org>
Add API to fetch, list, retrieve & find storage pool(s) on a connection.
---
libvirt-gobject/libvirt-gobject-connection.c | 279 ++++++++++++++++++++++++++
libvirt-gobject/libvirt-gobject-connection.h | 12 +-
libvirt-gobject/libvirt-gobject.sym | 6 +
3 files changed, 296 insertions(+), 1 deletions(-)
diff --git a/libvirt-gobject/libvirt-gobject-connection.c b/libvirt-gobject/libvirt-gobject-connection.c
index 69c6956..c512e79 100644
--- a/libvirt-gobject/libvirt-gobject-connection.c
+++ b/libvirt-gobject/libvirt-gobject-connection.c
@@ -43,6 +43,7 @@ struct _GVirConnectionPrivate
virConnectPtr conn;
GHashTable *domains;
+ GHashTable *pools;
};
G_DEFINE_TYPE(GVirConnection, gvir_connection, G_TYPE_OBJECT);
@@ -357,6 +358,11 @@ void gvir_connection_close(GVirConnection *conn)
priv->domains = NULL;
}
+ if (priv->pools) {
+ g_hash_table_unref(priv->pools);
+ priv->pools = NULL;
+ }
+
if (priv->conn) {
virConnectClose(priv->conn);
priv->conn = NULL;
@@ -503,6 +509,148 @@ cleanup:
return ret;
}
+/**
+ * gvir_connection_fetch_storage_pools:
+ * @conn: the connection
+ * @cancellable: (allow-none)(transfer none): cancellation object
+ */
+gboolean gvir_connection_fetch_storage_pools(GVirConnection *conn,
+ GCancellable *cancellable,
+ GError **err)
+{
+ GVirConnectionPrivate *priv = conn->priv;
+ GHashTable *pools;
+ gchar **inactive = NULL;
+ gint ninactive = 0;
+ gchar **active = NULL;
+ gint nactive = 0;
+ gboolean ret = FALSE;
+ gint i;
+ virConnectPtr vconn = NULL;
+
+ g_mutex_lock(priv->lock);
+ if (!priv->conn) {
+ *err = gvir_error_new(GVIR_CONNECTION_ERROR,
+ 0,
+ "Connection is not open");
+ g_mutex_unlock(priv->lock);
+ goto cleanup;
+ }
+ vconn = priv->conn;
+ /* Stop another thread closing the connection just at the minute */
+ virConnectRef(vconn);
+ g_mutex_unlock(priv->lock);
+
+ if (g_cancellable_set_error_if_cancelled(cancellable, err))
+ goto cleanup;
+
+ if ((nactive = virConnectNumOfStoragePools(vconn)) < 0) {
+ *err = gvir_error_new(GVIR_CONNECTION_ERROR,
+ 0,
+ "Unable to count pools");
+ goto cleanup;
+ }
+ if (nactive) {
+ if (g_cancellable_set_error_if_cancelled(cancellable, err))
+ goto cleanup;
+
+ active = g_new(gchar *, nactive);
+ if ((nactive = virConnectListStoragePools(vconn,
+ active,
+ nactive)) < 0) {
+ *err = gvir_error_new(GVIR_CONNECTION_ERROR,
+ 0,
+ "Unable to list pools");
+ goto cleanup;
+ }
+ }
+
+ if (g_cancellable_set_error_if_cancelled(cancellable, err))
+ goto cleanup;
+
+ if ((ninactive = virConnectNumOfDefinedStoragePools(vconn)) < 0) {
+ *err = gvir_error_new(GVIR_CONNECTION_ERROR,
+ 0,
+ "Unable to count pools");
+ goto cleanup;
+ }
+
+ if (ninactive) {
+ if (g_cancellable_set_error_if_cancelled(cancellable, err))
+ goto cleanup;
+
+ inactive = g_new(gchar *, ninactive);
+ if ((ninactive = virConnectListDefinedStoragePools(vconn,
+ inactive,
+ ninactive)) < 0) {
+ *err = gvir_error_new(GVIR_CONNECTION_ERROR,
+ 0,
+ "Unable to list pools %d", ninactive);
+ goto cleanup;
+ }
+ }
+
+ pools = g_hash_table_new_full(g_str_hash,
+ g_str_equal,
+ g_free,
+ g_object_unref);
+
+ for (i = 0 ; i < nactive ; i++) {
+ if (g_cancellable_set_error_if_cancelled(cancellable, err))
+ goto cleanup;
+
+ virStoragePoolPtr vpool;
+ GVirStoragePool *pool;
+
+ vpool = virStoragePoolLookupByName(vconn, active[i]);
+ if (!vpool)
+ continue;
+
+ pool = GVIR_STORAGE_POOL(g_object_new(GVIR_TYPE_STORAGE_POOL,
+ "handle", vpool,
+ NULL));
+
+ g_hash_table_insert(pools,
+ g_strdup(gvir_storage_pool_get_uuid(pool)),
+ pool);
+ }
+
+ for (i = 0 ; i < ninactive ; i++) {
+ if (g_cancellable_set_error_if_cancelled(cancellable, err))
+ goto cleanup;
+
+ virStoragePoolPtr vpool;
+ GVirStoragePool *pool;
+
+ vpool = virStoragePoolLookupByName(vconn, inactive[i]);
+ if (!vpool)
+ continue;
+
+ pool = GVIR_STORAGE_POOL(g_object_new(GVIR_TYPE_STORAGE_POOL,
+ "handle", vpool,
+ NULL));
+
+ g_hash_table_insert(pools,
+ g_strdup(gvir_storage_pool_get_uuid(pool)),
+ pool);
+ }
+
+ g_mutex_lock(priv->lock);
+ if (priv->pools)
+ g_hash_table_unref(priv->pools);
+ priv->pools = pools;
+ virConnectClose(vconn);
+ g_mutex_unlock(priv->lock);
+
+ ret = TRUE;
+
+cleanup:
+ g_free(active);
+ for (i = 0 ; i < ninactive ; i++)
+ g_free(inactive[i]);
+ g_free(inactive);
+ return ret;
+}
static void
gvir_connection_fetch_domains_helper(GSimpleAsyncResult *res,
@@ -566,6 +714,67 @@ gboolean gvir_connection_fetch_domains_finish(GVirConnection *conn,
return TRUE;
}
+static void
+gvir_connection_fetch_pools_helper(GSimpleAsyncResult *res,
+ GObject *object,
+ GCancellable *cancellable)
+{
+ GVirConnection *conn = GVIR_CONNECTION(object);
+ GError *err = NULL;
+
+ if (!gvir_connection_fetch_storage_pools(conn, cancellable, &err)) {
+ g_simple_async_result_set_from_error(res, err);
+ g_error_free(err);
+ }
+}
+
+/**
+ * gvir_connection_fetch_storage_pools_async:
+ * @conn: the connection
+ * @cancellable: (allow-none)(transfer none): cancellation object
+ * @callback: (transfer none): completion callback
+ * @opaque: (transfer none)(allow-none): opaque data for callback
+ */
+void gvir_connection_fetch_storage_pools_async(GVirConnection *conn,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer opaque)
+{
+ GSimpleAsyncResult *res;
+
+ res = g_simple_async_result_new(G_OBJECT(conn),
+ callback,
+ opaque,
+ gvir_connection_fetch_storage_pools);
+ g_simple_async_result_run_in_thread(res,
+ gvir_connection_fetch_pools_helper,
+ G_PRIORITY_DEFAULT,
+ cancellable);
+ g_object_unref(res);
+}
+
+/**
+ * gvir_connection_fetch_storage_pools_finish:
+ * @conn: the connection
+ * @result: (transfer none): async method result
+ */
+gboolean gvir_connection_fetch_storage_pools_finish(GVirConnection *conn,
+ GAsyncResult *result,
+ GError **err)
+{
+ g_return_val_if_fail(GVIR_IS_CONNECTION(conn), FALSE);
+ g_return_val_if_fail(G_IS_ASYNC_RESULT(result), FALSE);
+
+ if (G_IS_SIMPLE_ASYNC_RESULT(result)) {
+ GSimpleAsyncResult *simple = G_SIMPLE_ASYNC_RESULT(result);
+ g_warn_if_fail (g_simple_async_result_get_source_tag(simple) ==
+ gvir_connection_fetch_storage_pools);
+ if (g_simple_async_result_propagate_error(simple, err))
+ return FALSE;
+ }
+
+ return TRUE;
+}
const gchar *gvir_connection_get_uri(GVirConnection *conn)
{
@@ -595,6 +804,25 @@ GList *gvir_connection_get_domains(GVirConnection *conn)
}
/**
+ * gvir_connection_get_storage_pools:
+ *
+ * Return value: (element-type LibvirtGObject.StoragePool) (transfer full): List
+ * of #GVirStoragePool
+ */
+GList *gvir_connection_get_storage_pools(GVirConnection *conn)
+{
+ GVirConnectionPrivate *priv = conn->priv;
+ GList *pools;
+
+ g_mutex_lock(priv->lock);
+ pools = g_hash_table_get_values(priv->pools);
+ g_list_foreach(pools, gvir_domain_ref, NULL);
+ g_mutex_unlock(priv->lock);
+
+ return pools;
+}
+
+/**
* gvir_connection_get_domain:
* @uuid: uuid string of the requested domain
*
@@ -613,6 +841,26 @@ GVirDomain *gvir_connection_get_domain(GVirConnection *conn,
return dom;
}
+/**
+ * gvir_connection_get_storage_pool:
+ * @uuid: uuid string of the requested storage pool
+ *
+ * Return value: (transfer full): the #GVirStoragePool, or NULL
+ */
+GVirStoragePool *gvir_connection_get_storage_pool(GVirConnection *conn,
+ const gchar *uuid)
+{
+ GVirConnectionPrivate *priv = conn->priv;
+ GVirStoragePool *pool;
+
+ g_mutex_lock(priv->lock);
+ pool = g_hash_table_lookup(priv->pools, uuid);
+ if (pool)
+ g_object_ref(pool);
+ g_mutex_unlock(priv->lock);
+
+ return pool;
+}
/**
* gvir_connection_find_domain_by_id:
@@ -677,6 +925,37 @@ GVirDomain *gvir_connection_find_domain_by_name(GVirConnection *conn,
return NULL;
}
+/**
+ * gvir_connection_find_storage_pool_by_name:
+ * @name: name of the requested storage pool
+ *
+ * Return value: (transfer full): the #GVirStoragePool, or NULL
+ */
+GVirStoragePool *gvir_connection_find_storage_pool_by_name(GVirConnection *conn,
+ const gchar *name)
+{
+ GVirConnectionPrivate *priv = conn->priv;
+ GHashTableIter iter;
+ gpointer key, value;
+
+ g_mutex_lock(priv->lock);
+ g_hash_table_iter_init(&iter, priv->pools);
+
+ while (g_hash_table_iter_next(&iter, &key, &value)) {
+ GVirStoragePool *pool = value;
+ const gchar *thisname = gvir_storage_pool_get_name(pool);
+
+ if (strcmp(thisname, name) == 0) {
+ g_object_ref(pool);
+ g_mutex_unlock(priv->lock);
+ return pool;
+ }
+ }
+ g_mutex_unlock(priv->lock);
+
+ return NULL;
+}
+
static gpointer
gvir_connection_handle_copy(gpointer src)
{
diff --git a/libvirt-gobject/libvirt-gobject-connection.h b/libvirt-gobject/libvirt-gobject-connection.h
index c453bed..d05f792 100644
--- a/libvirt-gobject/libvirt-gobject-connection.h
+++ b/libvirt-gobject/libvirt-gobject-connection.h
@@ -141,14 +141,24 @@ GVirNodeDevice *gvir_connection_get_node_device(GVirConnection *conn,
GList *gvir_connection_get_secrets(GVirConnection *conn);
GVirSecret *gvir_connection_get_secret(GVirConnection *conn,
const gchar *uuid);
+#endif
+gboolean gvir_connection_fetch_storage_pools(GVirConnection *conn,
+ GCancellable *cancellable,
+ GError **err);
+void gvir_connection_fetch_storage_pools_async(GVirConnection *conn,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer opaque);
+gboolean gvir_connection_fetch_storage_pools_finish(GVirConnection *conn,
+ GAsyncResult *result,
+ GError **err);
GList *gvir_connection_get_storage_pools(GVirConnection *conn);
GVirStoragePool *gvir_connection_get_storage_pool(GVirConnection *conn,
const gchar *uuid);
GVirStoragePool *gvir_connection_find_storage_pool_by_name(GVirConnection *conn,
const gchar *name);
-#endif
GVirStream *gvir_connection_get_stream(GVirConnection *conn,
gint flags);
diff --git a/libvirt-gobject/libvirt-gobject.sym b/libvirt-gobject/libvirt-gobject.sym
index eae40a2..ff2f4cf 100644
--- a/libvirt-gobject/libvirt-gobject.sym
+++ b/libvirt-gobject/libvirt-gobject.sym
@@ -14,10 +14,16 @@ LIBVIRT_GOBJECT_0.0.1 {
gvir_connection_get_stream;
gvir_connection_fetch_domains;
+ gvir_connection_fetch_storage_pools;
+ gvir_connection_fetch_storage_pools_async;
+ gvir_connection_fetch_storage_pools_finish;
gvir_connection_get_domains;
+ gvir_connection_get_storage_pools;
gvir_connection_get_domain;
+ gvir_connection_get_storage_pool;
gvir_connection_find_domain_by_id;
gvir_connection_find_domain_by_name;
+ gvir_connection_find_storage_pool_by_name;
gvir_connection_create_domain;
gvir_domain_get_type;
--
1.7.6.2
13 years, 1 month
[libvirt] [PATCH v2 00/12] Implement keepalive protocol for libvirt RPC
by Jiri Denemark
This patchset can also be found at
https://gitorious.org/~jirka/libvirt/jirka-staging/commits/keepalive
This allows us to detect broken connections between server and client without
waiting for TCP timeout and dead deamon/client. By default a connection is
considered broken after about 30 seconds of no messages received from remote
party. After that period, the connection is automatically closed.
The main reason for implementing this is that peer-to-peer migration can now be
canceled when a connection between source and target breaks. Although this will
really work only after qemu fixes migrate_cancel command so that it doesn't
block when outgoing TCP buffers are full.
Version 2 adds virConnectIsAlive API and uses it to detect that a connection was
closed as a result of keepalive timeout.
The only patch that was changed in v2 is "Add keepalive support into
domain-events examples". All other patches are either new or without any change
from v1.
Jiri Denemark (12):
Define keepalive protocol
Implement common keepalive handling
Introduce two public APIs for keepalive protocol
Implement keepalive protocol in libvirt daemon
Add support for non-blocking calls in client RPC
Add support for async close of client RPC socket
Implement keepalive protocol in remote driver
Introduce virConnectIsAlive API
Implement virConnectIsAlive in all drivers
Add keepalive support into domain-events examples
qemu: Add support for keepalive messages during p2p migration
qemu: Cancel p2p migration when connection breaks
.gitignore | 1 +
daemon/libvirtd.aug | 4 +
daemon/libvirtd.c | 11 +
daemon/libvirtd.conf | 15 +
daemon/remote.c | 38 ++
examples/domain-events/events-c/event-test.c | 13 +-
examples/domain-events/events-python/event-test.py | 5 +-
include/libvirt/libvirt.h.in | 6 +
po/POTFILES.in | 1 +
src/Makefile.am | 13 +-
src/driver.h | 12 +
src/esx/esx_driver.c | 18 +
src/hyperv/hyperv_driver.c | 18 +
src/libvirt.c | 143 ++++++
src/libvirt_internal.h | 10 +-
src/libvirt_public.syms | 7 +
src/libxl/libxl_driver.c | 8 +
src/lxc/lxc_driver.c | 7 +
src/openvz/openvz_driver.c | 7 +
src/phyp/phyp_driver.c | 18 +
src/qemu/libvirtd_qemu.aug | 2 +
src/qemu/qemu.conf | 16 +
src/qemu/qemu_conf.c | 11 +
src/qemu/qemu_conf.h | 3 +
src/qemu/qemu_driver.c | 6 +
src/qemu/qemu_migration.c | 49 ++-
src/qemu/test_libvirtd_qemu.aug | 6 +
src/remote/remote_driver.c | 48 ++
src/remote/remote_protocol.x | 2 +-
src/rpc/virkeepalive.c | 464 ++++++++++++++++++++
src/rpc/virkeepalive.h | 58 +++
src/rpc/virkeepaliveprotocol.x | 8 +
src/rpc/virnetclient.c | 335 ++++++++++++--
src/rpc/virnetclient.h | 6 +
src/rpc/virnetserver.c | 10 +
src/rpc/virnetserver.h | 2 +
src/rpc/virnetserverclient.c | 126 +++++-
src/rpc/virnetserverclient.h | 6 +
src/test/test_driver.c | 6 +
src/uml/uml_driver.c | 7 +
src/vbox/vbox_tmpl.c | 6 +
src/vmware/vmware_driver.c | 7 +
src/xen/xen_driver.c | 8 +
src/xenapi/xenapi_driver.c | 12 +
44 files changed, 1483 insertions(+), 76 deletions(-)
create mode 100644 src/rpc/virkeepalive.c
create mode 100644 src/rpc/virkeepalive.h
create mode 100644 src/rpc/virkeepaliveprotocol.x
--
1.7.6.1
13 years, 1 month
[libvirt] [PATCH 0/2] snapshot: add force for risky reverts
by Eric Blake
I first documented the need for force back in my RFC:
https://www.redhat.com/archives/libvir-list/2011-August/msg00361.html
but only now got around to implementing it.
At the moment, I'm posting the code for early review. I'm still
in the process of testing out multiple scenarios, and will send
followup mail detailing the actual steps I took using virsh to
cover the multiple scenarios.
Eric Blake (2):
snapshot: add REVERT_FORCE to API
snapshot: enforce REVERT_FORCE on qemu
include/libvirt/libvirt.h.in | 1 +
include/libvirt/virterror.h | 2 +
src/libvirt.c | 22 +++++++++++++++++++
src/qemu/qemu_driver.c | 47 +++++++++++++++++++++++++++++++++---------
src/util/virterror.c | 6 +++++
tools/virsh.c | 19 ++++++++++++++++-
tools/virsh.pod | 17 +++++++++++++++
7 files changed, 103 insertions(+), 11 deletions(-)
--
1.7.4.4
13 years, 1 month
[libvirt] [RFC] security_dac: don't chown iso file
by Serge E. Hallyn
isos are read-only, so libvirt doesn't need to chown them. In one of
our testing setups, libvirt uses mirrorred isos. Since libvirt chowns
the files, (and especially does not chown them back) the mirror refuses
to update the iso.
This patch prevents libvirt from chowning files.
Does this seem reasonable?
Signed-off-by: Serge Hallyn <serge.hallyn(a)canonical.com>
---
src/security/security_dac.c | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/src/security/security_dac.c b/src/security/security_dac.c
index af02236..e7db324 100644
--- a/src/security/security_dac.c
+++ b/src/security/security_dac.c
@@ -555,6 +555,8 @@ virSecurityDACSetSecurityAllLabel(virSecurityManagerPtr mgr,
/* XXX fixme - we need to recursively label the entire tree :-( */
if (vm->def->disks[i]->type == VIR_DOMAIN_DISK_TYPE_DIR)
continue;
+ if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_CDROM)
+ continue;
if (virSecurityDACSetSecurityImageLabel(mgr,
vm,
vm->def->disks[i]) < 0)
--
1.7.5.4
13 years, 1 month