[libvirt] [PATCH 0/7] Allow multiple console devices for each guest
by Daniel P. Berrange
The current XML schema only allows for a single <console> element
per guest. Many hypervisors support multiple paravirt consoles,
and it'd be desirable to support that. This series does just that,
enabling multiple consoles for UML, QEMU and LXC
13 years, 1 month
[libvirt] [PATCH 1/2] Fix order of disks and controllers
by Jiri Denemark
Commit 2d6adabd53c8f1858191d521dc9b4948d1205955 replaced qsorting disk
and controller devices with inserting them at the right position. That
was to fix unnecessary reordering of devices. However, when parsing
domain XML devices are just taken in the order in which they appear in
the XML since. Use the correct insertion algorithm to honor device
target.
---
src/conf/domain_conf.c | 4 +-
.../qemuxml2argvdata/qemuxml2argv-boot-order.args | 35 ++++++++++++-------
tests/qemuxml2argvdata/qemuxml2argv-boot-order.xml | 14 ++++----
tests/xmconfigdata/test-escape-paths.cfg | 2 +-
tests/xmconfigdata/test-escape-paths.xml | 10 +++---
5 files changed, 37 insertions(+), 28 deletions(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index b406202..48ed7eb 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -5883,7 +5883,7 @@ static virDomainDefPtr virDomainDefParseXML(virCapsPtr caps,
if (!disk)
goto error;
- def->disks[def->ndisks++] = disk;
+ virDomainDiskInsertPreAlloced(def, disk);
}
VIR_FREE(nodes);
@@ -5899,7 +5899,7 @@ static virDomainDefPtr virDomainDefParseXML(virCapsPtr caps,
if (!controller)
goto error;
- def->controllers[def->ncontrollers++] = controller;
+ virDomainControllerInsertPreAlloced(def, controller);
}
VIR_FREE(nodes);
diff --git a/tests/qemuxml2argvdata/qemuxml2argv-boot-order.args b/tests/qemuxml2argvdata/qemuxml2argv-boot-order.args
index 23249f3..14367b1 100644
--- a/tests/qemuxml2argvdata/qemuxml2argv-boot-order.args
+++ b/tests/qemuxml2argvdata/qemuxml2argv-boot-order.args
@@ -1,13 +1,22 @@
-LC_ALL=C PATH=/bin HOME=/home/test USER=test LOGNAME=test /usr/bin/qemu -S -M \
-pc -m 214 -smp 1 -nographic -nodefaults -monitor unix:/tmp/test-monitor,server,\
-nowait -no-acpi -drive file=/dev/HostVG/QEMUGuest1,if=none,id=drive-ide0-0-0 \
--device ide-drive,bus=ide.0,unit=0,drive=drive-ide0-0-0,id=ide0-0-0 -drive \
-file=sheepdog:example.org:6000:image,if=none,id=drive-virtio-disk0 -device \
-virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,\
-bootindex=3 -drive file=/root/boot.iso,if=none,media=cdrom,id=drive-ide0-1-0 \
--device ide-drive,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0,\
-bootindex=1 -drive file=/dev/null,if=none,id=drive-fdc0-0-1 -global \
-isa-fdc.driveB=drive-fdc0-0-1 -global isa-fdc.bootindexB=4 -device \
-virtio-net-pci,vlan=0,id=net0,mac=00:11:22:33:44:55,bus=pci.0,addr=0x3,\
-bootindex=2 -net user,vlan=0,name=hostnet0 -usb -device virtio-balloon-pci,\
-id=balloon0,bus=pci.0,addr=0x5
+LC_ALL=C PATH=/bin HOME=/home/test USER=test LOGNAME=test /usr/bin/qemu \
+-S \
+-M pc \
+-m 214 \
+-smp 1 \
+-nographic \
+-nodefaults \
+-monitor unix:/tmp/test-monitor,server,nowait \
+-no-acpi \
+-drive file=/dev/HostVG/QEMUGuest1,if=none,id=drive-ide0-0-0 \
+-device ide-drive,bus=ide.0,unit=0,drive=drive-ide0-0-0,id=ide0-0-0 \
+-drive file=/root/boot.iso,if=none,media=cdrom,id=drive-ide0-1-0 \
+-device ide-drive,bus=ide.1,unit=0,drive=drive-ide0-1-0,id=ide0-1-0,bootindex=1 \
+-drive file=sheepdog:example.org:6000:image,if=none,id=drive-virtio-disk0 \
+-device virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=3 \
+-drive file=/dev/null,if=none,id=drive-fdc0-0-1 \
+-global isa-fdc.driveB=drive-fdc0-0-1 \
+-global isa-fdc.bootindexB=4 \
+-device virtio-net-pci,vlan=0,id=net0,mac=00:11:22:33:44:55,bus=pci.0,addr=0x3,bootindex=2 \
+-net user,vlan=0,name=hostnet0 \
+-usb \
+-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5
diff --git a/tests/qemuxml2argvdata/qemuxml2argv-boot-order.xml b/tests/qemuxml2argvdata/qemuxml2argv-boot-order.xml
index ba8a9b2..0022c92 100644
--- a/tests/qemuxml2argvdata/qemuxml2argv-boot-order.xml
+++ b/tests/qemuxml2argvdata/qemuxml2argv-boot-order.xml
@@ -18,6 +18,13 @@
<target dev='hda' bus='ide'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>
+ <disk type='file' device='cdrom'>
+ <source file='/root/boot.iso'/>
+ <target dev='hdc' bus='ide'/>
+ <boot order='1'/>
+ <readonly/>
+ <address type='drive' controller='0' bus='1' unit='0'/>
+ </disk>
<disk type='network' device='disk'>
<driver name='qemu' type='raw'/>
<source protocol='sheepdog' name='image'>
@@ -26,13 +33,6 @@
<target dev='vda' bus='virtio'/>
<boot order='3'/>
</disk>
- <disk type='file' device='cdrom'>
- <source file='/root/boot.iso'/>
- <target dev='hdc' bus='ide'/>
- <boot order='1'/>
- <readonly/>
- <address type='drive' controller='0' bus='1' unit='0'/>
- </disk>
<disk type='file' device='floppy'>
<driver name='qemu' type='raw'/>
<source file='/dev/null'/>
diff --git a/tests/xmconfigdata/test-escape-paths.cfg b/tests/xmconfigdata/test-escape-paths.cfg
index e3e6db9..13be2a0 100644
--- a/tests/xmconfigdata/test-escape-paths.cfg
+++ b/tests/xmconfigdata/test-escape-paths.cfg
@@ -19,7 +19,7 @@ vnc = 1
vncunused = 1
vnclisten = "127.0.0.1"
vncpasswd = "123poi"
-disk = [ "phy:/dev/HostVG/XenGuest2,hda,w", "file:/root/boot.iso&test,hdc:cdrom,r", """phy:/dev/HostVG/XenGuest'",hdb,w""" ]
+disk = [ "phy:/dev/HostVG/XenGuest2,hda,w", """phy:/dev/HostVG/XenGuest'",hdb,w""", "file:/root/boot.iso&test,hdc:cdrom,r" ]
vif = [ "mac=00:16:3e:66:92:9c,bridge=xenbr1,script=vif-bridge,model=e1000,type=ioemu" ]
parallel = "none"
serial = "none"
diff --git a/tests/xmconfigdata/test-escape-paths.xml b/tests/xmconfigdata/test-escape-paths.xml
index 13e6e29..9eaf90c 100644
--- a/tests/xmconfigdata/test-escape-paths.xml
+++ b/tests/xmconfigdata/test-escape-paths.xml
@@ -25,17 +25,17 @@
<source dev='/dev/HostVG/XenGuest2'/>
<target dev='hda' bus='ide'/>
</disk>
+ <disk type='block' device='disk'>
+ <driver name='phy'/>
+ <source dev='/dev/HostVG/XenGuest'"'/>
+ <target dev='hdb' bus='ide'/>
+ </disk>
<disk type='file' device='cdrom'>
<driver name='file'/>
<source file='/root/boot.iso&test'/>
<target dev='hdc' bus='ide'/>
<readonly/>
</disk>
- <disk type='block' device='disk'>
- <driver name='phy'/>
- <source dev='/dev/HostVG/XenGuest'"'/>
- <target dev='hdb' bus='ide'/>
- </disk>
<interface type='bridge'>
<mac address='00:16:3e:66:92:9c'/>
<source bridge='xenbr1'/>
--
1.7.5.3
13 years, 1 month
[libvirt] Notes from the KVM Forum relevant to libvirt
by Daniel P. Berrange
I was at the KVM Forum / LinuxCon last week and there were many
interesting things discussed which are relevant to ongoing libvirt
development. Here was the list that caught my attention. If I have
missed any, fill in the gaps....
- Sandbox/container KVM. The Solaris port of KVM puts QEMU inside
a zone so that an exploit of QEMU can't escape into the full OS.
Containers are Linux's parallel of Zones, and while not nearly as
secure yet, it would still be worth using more containers support
to confine QEMU.
- Events for object changes. We already have async events for virDomainPtr.
We need the same for virInterfacePtr, virStoragePoolPtr, virStorageVolPtr
and virNodeDevPtr, so that at the very least applications can be notified
when objects are created or removed. For virNodeDevPtr we also want to
be notified when properties change (ie CDROM media change).
- CGroups passthrough. There is alot of experimentation with cgroups. We
don't want to expose cgroups as a direct concept in the libvirt API,
but we should consider putting a generic cgroups get/set in the
libvirt-qemu.so library, or create a libvirt-linux.so library.
Also likely add a <linux:cgroups> XML element to store arbitrary
tunables in the XML. Same (low) level of support as with qemu:XXX
of course
- CPUSet for changing CPU + Memory NUMA pinning. The CPUset cgroups
controller is able to actually move a guest's memory between NUMA
nodes. We can already change VCPU pinning, but we need a new API
to do node pinning of the whole VM, so we can ensure the I/O threads
are also moved. We also need an API to move the memory pinning to
new nodes.
- Guest NUMA topology. If we have guests with RAM size > node size,
we need to expose a NUMA topology into the guest. The CPU/memory
pinning APIs will also need to be able to pin individual guest
NUMA nodes to individual host NUMA nodes.
- AHCI controller. IDE is going the way of the dodo. We need to add
support for QEMU's new AHCI controller. This is quite simple, we
already have a 'sata' disk type we can wire up to QEMU
- VFIO PCI passthru. The current PCI assignment code may well be
changed to use something called 'VFIO'. This will need some
work in libvirt to support new CLI arg syntax, and probably
some SELinux work
- QCow3. There will soon be a QCow3 format. We need to add code to
detect it and extract backing stores, etc. Trivial since the primary
header format will still be the same as QCow2.
- QMP completion. Given anthony's plan for a complete replacement of
the current CLI + monitor syntax in QEMU 2.0 (long way out), he has
dropped objections to adding new commands to QMP in the near future.
So all existing HMP commands will immediately be made available in
QMP with no attempt to re-design them now. So the need for the HMP
passthrough command will soon go away.
- Migration + VEPA/VNLink failures. As raised previously on this list,
Cisco really wants libvirt to have the ability to do migration, and
optionally *not* fail, even if the VEPA/VNLink setup fails. This will
require an event notification to the app if a failure of a device
backend occurs, and an API to let the admin app fix the device backend
(virDomainUpdateDevice) and some way to tell migration what bits are
allowed to fail.
- Virtio SCSI. We need to support this new stuff in QEMU when it is
eventually implemented. It will mean we avoid the PCI slot usage
problems inherant in virtio-blk, and get other things like multipath
and decent SCSI passthrough support.
- USB 2.0. We need to support this in libvirt asap. It is very important
for desktop experiance and to support better integration with SPICE
This also gets us proper USB port addressing. Fun footnote, QEMU USB
has *never* supported migration. The USB tablet only works by sheer
luck, as OS' see the device disappear on migration & come back with
different device ID/port addr and so does a re-initialize !
- Native KVM tool. The problem statement was that the QEMU code is too
big/complex & and command line args are too complex, so lets rewrite
from scratch to make the code small & CLI simple. They achieve this,
but of course primarily because they lack so many features compared
to QEMU. They had libvirt support as a bullet point on their preso,
but I'm not expecting it to replace the current QEMU KVM support in
the forseeable future, given its current level of features and the
size of its dev team compared to QEMU/KVM. They did have some fun
demos of booting using the host OS filesystem though. We can
actually do the same with regular KVM/libvirt but there's no nice
demo tool to show it off. I'm hoping to create one....
- Shared memory devices. Some people doing high performance work are
using the QEMU shared memory device. We don't support this (ivhshm
device) in libvirt yet. Fairly niche use cases but might be nice to
have this.
- SDK / Docs. Request for a more SDK like approach to KVM development
tools and documentation. Also want to simplify libvirt operations.
The exposure of the virt-install internal API as official GObjects
would have significantly helped the project Ricardo (from IBM)
described in his presentation. Of course no one can argue that we
need more documentation in every area.
- USB managed mode. As we do with PCI passthrough, we should be able
to detach USB device from host OS, and perform a reset before
attaching to the guest, and most importantly track which USB devices
have been given to which guest, so we don't duplicate assign. We have
all neccessary APIs, just need to wire them up.
- PCI passthrough. We need to support setting of MAC addr, VLAN and
VEPA/VNLink properties against VFs from SRIOV NICs that are assigned
to a guest.
For those who were not at the KVM Forum, the presentations are already
available online at:
http://www.linux-kvm.org/page/KVM_Forum_2011
All the session were also video recorded, so sometime in the next week
or two, there should be OGG videos of the talks being uploaded to the
same site.
Regards,
Daniel
--
|: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org -o- http://virt-manager.org :|
|: http://autobuild.org -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|
13 years, 1 month
[libvirt] [PATCH 1/2] docs: improve typed parameter documentation
by Eric Blake
virDomainBlockStatsFlags was missing a check that was present in
virDomainGetMemoryParameters. Additionally, I found that the
existing descriptions were a bit hard to read. A later patch
will fix qemu to return fewer than max parameters if @nparams
was too small on input, rather than outright fail.
* src/libvirt.c (virDomainGetMemoryParameters)
(virDomainGetBlkioParameters, virDomainGetSchedulerParameters)
(virDomainGetSchedulerParametersFlags):
Tweak documentation wording.
(virDomainBlockStatsFlags): Likewise, and add sanity check.
---
src/libvirt.c | 100 +++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 65 insertions(+), 35 deletions(-)
diff --git a/src/libvirt.c b/src/libvirt.c
index e9d1a29..f9cddef 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -3639,15 +3639,17 @@ error:
* @domain: pointer to domain object
* @params: pointer to memory parameter object
* (return value, allocated by the caller)
- * @nparams: pointer to number of memory parameters
+ * @nparams: pointer to number of memory parameters; input and output
* @flags: one of virDomainModificationImpact
*
- * Get all memory parameters, the @params array will be filled with the values
- * equal to the number of parameters suggested by @nparams
+ * Get all memory parameters. On input, @nparams gives the size of the
+ * @params array; on output, @nparams gives how many slots were filled
+ * with parameter information, which might be less but will not exceed
+ * the input value.
*
- * As the value of @nparams is dynamic, call the API setting @nparams to 0 and
- * @params as NULL, the API returns the number of parameters supported by the
- * HV by updating @nparams on SUCCESS. The caller should then allocate @params
+ * As a special case, calling with @params as NULL and @nparams as 0 on
+ * input will cause @nparams on output to contain the number of parameters
+ * supported by the hypervisor. The caller should then allocate @params
* array, i.e. (sizeof(@virTypedParameter) * @nparams) bytes and call the API
* again.
*
@@ -3765,12 +3767,21 @@ error:
* @domain: pointer to domain object
* @params: pointer to blkio parameter object
* (return value, allocated by the caller)
- * @nparams: pointer to number of blkio parameters
+ * @nparams: pointer to number of blkio parameters; input and output
* @flags: an OR'ed set of virDomainModificationImpact
*
- * Get all blkio parameters, the @params array will be filled with the values
- * equal to the number of parameters suggested by @nparams.
- * See virDomainGetMemoryParameters for an equivalent usage example.
+ * Get all blkio parameters. On input, @nparams gives the size of the
+ * @params array; on output, @nparams gives how many slots were filled
+ * with parameter information, which might be less but will not exceed
+ * the input value.
+ *
+ * As a special case, calling with @params as NULL and @nparams as 0 on
+ * input will cause @nparams on output to contain the number of parameters
+ * supported by the hypervisor. The caller should then allocate @params
+ * array, i.e. (sizeof(@virTypedParameter) * @nparams) bytes and call the API
+ * again.
+ *
+ * See virDomainGetMemoryParameters() for an equivalent usage example.
*
* This function may require privileged access to the hypervisor. This function
* expects the caller to allocate the @params.
@@ -6335,14 +6346,17 @@ error:
* @params: pointer to scheduler parameter objects
* (return value)
* @nparams: pointer to number of scheduler parameter objects
- * (this value must be at least as large as the returned value
- * nparams of virDomainGetSchedulerType)
+ * (this value should generally be as large as the returned value
+ * nparams of virDomainGetSchedulerType()); input and output
+ *
+ * Get all scheduler parameters. On input, @nparams gives the size of the
+ * @params array; on output, @nparams gives how many slots were filled
+ * with parameter information, which might be less but will not exceed
+ * the input value. @nparams cannot be 0.
*
- * Get all scheduler parameters, the @params array will be filled with the
- * values and @nparams will be updated to the number of valid elements in
- * @params. It is hypervisor specific whether this returns the live or
+ * It is hypervisor specific whether this returns the live or
* persistent state; for more control, use
- * virDomainGetSchedulerParametersFlags.
+ * virDomainGetSchedulerParametersFlags().
*
* Returns -1 in case of error, 0 in case of success.
*/
@@ -6391,15 +6405,28 @@ error:
* (return value)
* @nparams: pointer to number of scheduler parameter
* (this value should be same than the returned value
- * nparams of virDomainGetSchedulerType)
+ * nparams of virDomainGetSchedulerType()); input and output
* @flags: one of virDomainModificationImpact
*
- * Get the scheduler parameters, the @params array will be filled with the
- * values.
+ * Get all scheduler parameters. On input, @nparams gives the size of the
+ * @params array; on output, @nparams gives how many slots were filled
+ * with parameter information, which might be less but will not exceed
+ * the input value. @nparams cannot be 0.
*
* The value of @flags can be exactly VIR_DOMAIN_AFFECT_CURRENT,
* VIR_DOMAIN_AFFECT_LIVE, or VIR_DOMAIN_AFFECT_CONFIG.
*
+ * Here is a sample code snippet:
+ *
+ * char *ret = virDomainGetSchedulerType(dom, &nparams);
+ * if (ret && nparams != 0) {
+ * if ((params = malloc(sizeof(*params) * nparams)) == NULL)
+ * goto error;
+ * memset(params, 0, sizeof(*params) * nparams);
+ * if (virDomainGetSchedulerParametersFlags(dom, params, &nparams, 0))
+ * goto error;
+ * }
+ *
* Returns -1 in case of error, 0 in case of success.
*/
int
@@ -6633,8 +6660,8 @@ error:
* @path: path to the block device
* @params: pointer to block stats parameter object
* (return value)
- * @nparams: pointer to number of block stats
- * @flags: unused, always passes 0
+ * @nparams: pointer to number of block stats; input and output
+ * @flags: unused, always pass 0
*
* This function is to get block stats parameters for block
* devices attached to the domain.
@@ -6646,24 +6673,26 @@ error:
* Domains may have more than one block device. To get stats for
* each you should make multiple calls to this function.
*
- * The @params array will be filled with the value equal to the number of
- * parameters suggested by @nparams.
+ * On input, @nparams gives the size of the @params array; on output,
+ * @nparams gives how many slots were filled with parameter
+ * information, which might be less but will not exceed the input
+ * value.
*
- * As the value of @nparams is dynamic, call the API setting @nparams to 0 and
- * @params as NULL, the API returns the number of parameters supported by the
- * HV by updating @nparams on SUCCESS. (Note that block device of different type
- * might support different parameters numbers, so it might be necessary to compute
- * @nparams for each block device type). The caller should then allocate @params
+ * As a special case, calling with @params as NULL and @nparams as 0 on
+ * input will cause @nparams on output to contain the number of parameters
+ * supported by the hypervisor. (Note that block devices of different types
+ * might support different parameters, so it might be necessary to compute
+ * @nparams for each block device). The caller should then allocate @params
* array, i.e. (sizeof(@virTypedParameter) * @nparams) bytes and call the API
- * again. See virDomainGetMemoryParameters for more details.
+ * again. See virDomainGetMemoryParameters() for more details.
*
* Returns -1 in case of error, 0 in case of success.
*/
-int virDomainBlockStatsFlags (virDomainPtr dom,
- const char *path,
- virTypedParameterPtr params,
- int *nparams,
- unsigned int flags)
+int virDomainBlockStatsFlags(virDomainPtr dom,
+ const char *path,
+ virTypedParameterPtr params,
+ int *nparams,
+ unsigned int flags)
{
virConnectPtr conn;
@@ -6677,7 +6706,8 @@ int virDomainBlockStatsFlags (virDomainPtr dom,
virDispatchError(NULL);
return -1;
}
- if (!path || (nparams == NULL) || (*nparams < 0)) {
+ if (!path || (nparams == NULL) || (*nparams < 0) ||
+ (params == NULL && *nparams != 0)) {
virLibConnError(VIR_ERR_INVALID_ARG, __FUNCTION__);
goto error;
}
--
1.7.4.4
13 years, 1 month
[libvirt] RFC decoupling VM NIC provisioning from VM NIC connection to backend networks
by Sumit Naiksatam (snaiksat)
Hi,
In its current implementation Libvirt makes sure that the network
interfaces that it passes/provision to a VM (for example to qemu[-kvm])
are already connected to its backend (interfaces/networks) by the time
the VM starts its boot process. In a non virtualized setup it would be
like booting a machine with the Ethernet cable already plugged into a
router/switch port. While in a non virtualized setup you can boot a
machine first (with no physical connection to a router/switch) and later
connect its NIC/s to the switch/router, when you boot a VM via Libvirt
it is not possible to decouple the two actions (VM boot, cable
plug/unplug).
An example of case where the capability of decoupling the two actions
mentioned above is a requirement in Quantum/NetStack which is the
network service leveraged by OpenStack. The modular design of OpenStack
allows you to:
- provision VMs with NIC/s
- create networks
- create ports on networks
- plug/unplug a VM NIC into/from a given port on a network (at runtime)
Note that this runtime plug/unplug requirement has nothing to do with
hot plug/unplug of NICs.
The idea is more that of decoupling the provisioning of a VM from the
connection of the VM to the network/s.
This would make it possible to change (at run-time too) the networks the
NIC/s of a given VM are connected to.
For example, when a VM boots, its interfaces should be in link down
state if the network admin has not connected the VM NIC/s to any
"network" yet.
Even though libvirt already provides a way to change the link state of
an a VM NIC, link state and physical connection are two different things
and should be manageable independently.
Ideally the configuration syntax should be interface type and hypervisor
type agnostic.
Let's take QEMU[-kvm] as an example - when Libvirt starts a QEMU VM, it
passes to QEMU a number of file descriptors that map to host backend
interfaces (for example macvtap interfaces).
In order to introduce this runtime plug/unplug capability, we need a
mechanism that permits to delay the binding between the host macvtap
interfaces and the guest taps (because you cannot know the fd of the
macvtap interfaces before you create them). This means you need a
mechanism that allows you to change such fd/s at runtime:
- you can close/reset an fd (ie, when you disconnect a VM NIC from its
network)
- you can open/set an fd (ie, when you connect a VM NIC to a network)
This could probably be a libvirt command that translates to a QEMU
monitor command.
Can the runtime plug/unplug capability described above be achieved
(cleanly) with another mechanism?
Is anybody working on implementing something similar?
[For more information on OpenStack/NetStack/Quantum and the above
requirements please refer to the network model used therein:
http://docs.openstack.org/incubation/openstack-network/admin/content/Wha
tIsQuantum.html (information on network, port, and attachment
abstractions)
http://www.slideshare.net/danwent/quantum-diablo-summary (slides 7 & 8)]
Thanks,
~Sumit Naiksatam.
(On behalf of OpenStack/Quantum team)
13 years, 1 month
[libvirt] [PATCH] esx: Support vSphere 5.x
by Patrice LACHANCE
Hello
Based on http://www.redhat.com/archives/libvir-list/2010-July/msg00480.html,
I created this quick patch to accept connection to vSphere 5.x
Notes:
- I had to remove the warnings for version > 4.1 & 5 because they were
generating errors when running scripts encapsulating virsh commands with
opennebula. A better option might be to relie on an environment variable
(for example DONT_BLAME_LIBVIRT!) to toggle warnings on/off
- Ran only a few tests but it works as expected.
- Next steps should be to work on VMware API 5. integration
Patrice
diff --git a/src/esx/esx_driver.c b/src/esx/esx_driver.c
index 41086ef..78872d4 100644
--- a/src/esx/esx_driver.c
+++ b/src/esx/esx_driver.c
@@ -730,9 +730,11 @@ esxConnectToHost(esxPrivate *priv, virConnectAuthPtr
auth,
if (priv->host->productVersion != esxVI_ProductVersion_ESX35 &&
priv->host->productVersion != esxVI_ProductVersion_ESX40 &&
priv->host->productVersion != esxVI_ProductVersion_ESX41 &&
- priv->host->productVersion != esxVI_ProductVersion_ESX4x) {
+ priv->host->productVersion != esxVI_ProductVersion_ESX4x &&
+ priv->host->productVersion != esxVI_ProductVersion_ESX50 &&
+ priv->host->productVersion != esxVI_ProductVersion_ESX5x) {
ESX_ERROR(VIR_ERR_INTERNAL_ERROR,
- _("%s is neither an ESX 3.5 host nor an ESX 4.x
host"),
+ _("%s is neither an ESX 3.5, 4.x nor 5.x host"),
hostname);
goto cleanup;
}
@@ -857,10 +859,11 @@ esxConnectToVCenter(esxPrivate *priv,
virConnectAuthPtr auth,
if (priv->vCenter->productVersion != esxVI_ProductVersion_VPX25 &&
priv->vCenter->productVersion != esxVI_ProductVersion_VPX40 &&
priv->vCenter->productVersion != esxVI_ProductVersion_VPX41 &&
- priv->vCenter->productVersion != esxVI_ProductVersion_VPX4x) {
+ priv->vCenter->productVersion != esxVI_ProductVersion_VPX4x &&
+ priv->vCenter->productVersion != esxVI_ProductVersion_VPX50 &&
+ priv->vCenter->productVersion != esxVI_ProductVersion_VPX5x) {
ESX_ERROR(VIR_ERR_INTERNAL_ERROR,
- _("%s is neither a vCenter 2.5 server nor a vCenter "
- "4.x server"), hostname);
+ _("%s is neither a vCenter 2.5, 4.x nor 5.x server"),
hostname);
goto cleanup;
}
diff --git a/src/esx/esx_vi.c b/src/esx/esx_vi.c
index 325157c..3a11116 100644
--- a/src/esx/esx_vi.c
+++ b/src/esx/esx_vi.c
@@ -675,9 +675,23 @@ esxVI_Context_Connect(esxVI_Context *ctx, const char
*url,
VIR_WARN("Found untested VI API major/minor version '%s'",
ctx->service->about->apiVersion);
+ } else if (STRPREFIX(ctx->service->about->apiVersion, "5.0")) {
+ ctx->apiVersion = esxVI_APIVersion_50;
+
+ /*
+ VIR_WARN("Found untested VI API major/minor version '%s'",
+ ctx->service->about->apiVersion);
+ */
+ } else if (STRPREFIX(ctx->service->about->apiVersion, "5.")) {
+ ctx->apiVersion = esxVI_APIVersion_5x;
+
+ /*
+ VIR_WARN("Found untested VI API major/minor version '%s'",
+ ctx->service->about->apiVersion);
+ */
} else {
ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR,
- _("Expecting VI API major/minor version '2.5' or
'4.x' "
+ _("Expecting VI API major/minor version '2.5',
'4.x' or '5.x' "
"but found '%s'"),
ctx->service->about->apiVersion);
return -1;
}
@@ -704,10 +718,24 @@ esxVI_Context_Connect(esxVI_Context *ctx, const char
*url,
VIR_WARN("Found untested ESX major/minor version '%s'",
ctx->service->about->version);
+ } else if (STRPREFIX(ctx->service->about->version, "5.0")) {
+ ctx->productVersion = esxVI_ProductVersion_ESX50;
+
+ /*
+ VIR_WARN("Found untested ESX major/minor version '%s'",
+ ctx->service->about->version);
+ */
+ } else if (STRPREFIX(ctx->service->about->version, "5.")) {
+ ctx->productVersion = esxVI_ProductVersion_ESX5x;
+
+ /*
+ VIR_WARN("Found untested ESX major/minor version '%s'",
+ ctx->service->about->version);
+ */
} else {
ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR,
- _("Expecting ESX major/minor version '3.5' or
"
- "'4.x' but found '%s'"),
+ _("Expecting ESX major/minor version '3.5',
'4.x' or "
+ "'5.x' but found '%s'"),
ctx->service->about->version);
return -1;
}
@@ -723,9 +751,23 @@ esxVI_Context_Connect(esxVI_Context *ctx, const char
*url,
VIR_WARN("Found untested VPX major/minor version '%s'",
ctx->service->about->version);
+ } else if (STRPREFIX(ctx->service->about->version, "5.0")) {
+ ctx->productVersion = esxVI_ProductVersion_VPX50;
+
+ /*
+ VIR_WARN("Found untested VPX major/minor version '%s'",
+ ctx->service->about->version);
+ */
+ } else if (STRPREFIX(ctx->service->about->version, "5.")) {
+ ctx->productVersion = esxVI_ProductVersion_VPX5x;
+
+ /*
+ VIR_WARN("Found untested VPX major/minor version '%s'",
+ ctx->service->about->version);
+ */
} else {
ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR,
- _("Expecting VPX major/minor version '2.5' or
'4.x' "
+ _("Expecting VPX major/minor version '2.5',
'4.x' or '5.x' "
"but found '%s'"),
ctx->service->about->version);
return -1;
}
@@ -3919,6 +3961,12 @@
esxVI_ProductVersionToDefaultVirtualHWVersion(esxVI_ProductVersion
productVersio
case esxVI_ProductVersion_VPX4x:
return 7;
+ case esxVI_ProductVersion_ESX50:
+ case esxVI_ProductVersion_VPX50:
+ case esxVI_ProductVersion_ESX5x:
+ case esxVI_ProductVersion_VPX5x:
+ return 8;
+
default:
ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR, "%s",
_("Unexpected product version"));
diff --git a/src/esx/esx_vi.h b/src/esx/esx_vi.h
index b8e921f..fbf3fb2 100644
--- a/src/esx/esx_vi.h
+++ b/src/esx/esx_vi.h
@@ -99,7 +99,9 @@ enum _esxVI_APIVersion {
esxVI_APIVersion_25,
esxVI_APIVersion_40,
esxVI_APIVersion_41,
- esxVI_APIVersion_4x /* > 4.1 */
+ esxVI_APIVersion_4x,
+ esxVI_APIVersion_50,
+ esxVI_APIVersion_5x /* > 5.0 */
};
/*
@@ -116,13 +118,17 @@ enum _esxVI_ProductVersion {
esxVI_ProductVersion_ESX35 = esxVI_ProductVersion_ESX | 1,
esxVI_ProductVersion_ESX40 = esxVI_ProductVersion_ESX | 2,
esxVI_ProductVersion_ESX41 = esxVI_ProductVersion_ESX | 3,
- esxVI_ProductVersion_ESX4x = esxVI_ProductVersion_ESX | 4, /* > 4.1 */
+ esxVI_ProductVersion_ESX4x = esxVI_ProductVersion_ESX | 4,
+ esxVI_ProductVersion_ESX50 = esxVI_ProductVersion_ESX | 5,
+ esxVI_ProductVersion_ESX5x = esxVI_ProductVersion_ESX | 6, /* > 5.0 */
esxVI_ProductVersion_VPX = (1 << 2) << 16,
esxVI_ProductVersion_VPX25 = esxVI_ProductVersion_VPX | 1,
esxVI_ProductVersion_VPX40 = esxVI_ProductVersion_VPX | 2,
esxVI_ProductVersion_VPX41 = esxVI_ProductVersion_VPX | 3,
- esxVI_ProductVersion_VPX4x = esxVI_ProductVersion_VPX | 4 /* > 4.1 */
+ esxVI_ProductVersion_VPX4x = esxVI_ProductVersion_VPX | 4,
+ esxVI_ProductVersion_VPX50 = esxVI_ProductVersion_VPX | 5,
+ esxVI_ProductVersion_VPX5x = esxVI_ProductVersion_VPX | 6 /* > 5.0 */
};
enum _esxVI_Occurrence {
13 years, 1 month
[libvirt] [PATCH] Fix storage pool source comparison to avoid comparing with self
by Daniel P. Berrange
From: "Daniel P. Berrange" <berrange(a)redhat.com>
If we are comparing storage pools we must skip comparing with
ourself, so that re-defining an existing pool works
* conf/storage_conf.c: Skip self when comparing
---
src/conf/storage_conf.c | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
index e893b2d..eb39198 100644
--- a/src/conf/storage_conf.c
+++ b/src/conf/storage_conf.c
@@ -1730,6 +1730,10 @@ int virStoragePoolSourceFindDuplicate(virStoragePoolObjListPtr pools,
if (def->type != pool->def->type)
continue;
+ /* Don't mach against ourself if re-defining existing pool ! */
+ if (STREQ(pool->def->name, def->name))
+ continue;
+
virStoragePoolObjLock(pool);
switch (pool->def->type) {
--
1.7.6.4
13 years, 1 month
[libvirt] [PATCH] qemu: plug memory leak
by ajia@redhat.com
From: Alex Jia <ajia(a)redhat.com>
Detected by Coverity. Leak introduced in commit 6cabc0b.
* src/qemu/qemu_command.c: Clean up on failure.
Signed-off-by: Alex Jia <ajia(a)redhat.com>
---
src/qemu/qemu_command.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 0936492..02958cb 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -4995,6 +4995,7 @@ qemuBuildCommandLine(virConnectPtr conn,
}
if (sound->model == VIR_DOMAIN_SOUND_MODEL_ICH6) {
+ VIR_FREE(modstr);
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("this QEMU binary lacks hda support"));
goto error;
--
1.7.1
13 years, 1 month