[libvirt] [PATCH v2] storage: zfs: implement pool build and delete
by Roman Bogorodskiy
- Provide an implementation for buildPool and deletePool operations
for the ZFS storage backend.
- Add VIR_STORAGE_POOL_SOURCE_DEVICE flag to ZFS pool poolOptions
as now we can specify devices to build pool from
- storagepool.rng: add an optional 'sourceinfodev' to 'sourcezfs' and
add an optional 'target' to 'poolzfs' entity
- Add a couple of tests to storagepoolxml2xmltest
---
docs/schemas/storagepool.rng | 6 +++
src/conf/storage_conf.c | 3 +-
src/storage/storage_backend_zfs.c | 57 ++++++++++++++++++++++
tests/storagepoolxml2xmlin/pool-zfs-sourcedev.xml | 8 +++
tests/storagepoolxml2xmlin/pool-zfs.xml | 7 +++
tests/storagepoolxml2xmlout/pool-zfs-sourcedev.xml | 19 ++++++++
tests/storagepoolxml2xmlout/pool-zfs.xml | 18 +++++++
tests/storagepoolxml2xmltest.c | 2 +
8 files changed, 119 insertions(+), 1 deletion(-)
create mode 100644 tests/storagepoolxml2xmlin/pool-zfs-sourcedev.xml
create mode 100644 tests/storagepoolxml2xmlin/pool-zfs.xml
create mode 100644 tests/storagepoolxml2xmlout/pool-zfs-sourcedev.xml
create mode 100644 tests/storagepoolxml2xmlout/pool-zfs.xml
diff --git a/docs/schemas/storagepool.rng b/docs/schemas/storagepool.rng
index 908cc11..2d165a3 100644
--- a/docs/schemas/storagepool.rng
+++ b/docs/schemas/storagepool.rng
@@ -166,6 +166,9 @@
<ref name='commonmetadata'/>
<ref name='sizing'/>
<ref name='sourcezfs'/>
+ <optional>
+ <ref name='target'/>
+ </optional>
</interleave>
</define>
@@ -386,6 +389,9 @@
<element name='source'>
<interleave>
<ref name='sourceinfoname'/>
+ <optional>
+ <ref name='sourceinfodev'/>
+ </optional>
</interleave>
</element>
</define>
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
index d42cde7..36696a4 100644
--- a/src/conf/storage_conf.c
+++ b/src/conf/storage_conf.c
@@ -282,7 +282,8 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
},
{.poolType = VIR_STORAGE_POOL_ZFS,
.poolOptions = {
- .flags = (VIR_STORAGE_POOL_SOURCE_NAME),
+ .flags = (VIR_STORAGE_POOL_SOURCE_NAME |
+ VIR_STORAGE_POOL_SOURCE_DEVICE),
.defaultFormat = VIR_STORAGE_FILE_RAW,
},
},
diff --git a/src/storage/storage_backend_zfs.c b/src/storage/storage_backend_zfs.c
index d8201ac..9482706 100644
--- a/src/storage/storage_backend_zfs.c
+++ b/src/storage/storage_backend_zfs.c
@@ -325,6 +325,61 @@ virStorageBackendZFSDeleteVol(virConnectPtr conn ATTRIBUTE_UNUSED,
return ret;
}
+static int
+virStorageBackendZFSBuildPool(virConnectPtr conn ATTRIBUTE_UNUSED,
+ virStoragePoolObjPtr pool,
+ unsigned int flags)
+{
+ virCommandPtr cmd = NULL;
+ size_t i;
+ int ret = -1;
+
+ virCheckFlags(0, -1);
+
+ if (pool->def->source.ndevice == 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ "%s", _("missing source devices"));
+ return -1;
+ }
+
+ cmd = virCommandNewArgList(ZPOOL, "create",
+ pool->def->source.name, NULL);
+
+ for (i = 0; i < pool->def->source.ndevice; i++)
+ virCommandAddArg(cmd, pool->def->source.devices[i].path);
+
+ if (virCommandRun(cmd, NULL) < 0)
+ goto cleanup;
+
+ ret = 0;
+
+ cleanup:
+ virCommandFree(cmd);
+ return ret;
+}
+
+static int
+virStorageBackendZFSDeletePool(virConnectPtr conn ATTRIBUTE_UNUSED,
+ virStoragePoolObjPtr pool,
+ unsigned int flags)
+{
+ virCommandPtr cmd = NULL;
+ int ret = -1;
+
+ virCheckFlags(0, -1);
+
+ cmd = virCommandNewArgList(ZPOOL, "destroy",
+ pool->def->source.name, NULL);
+
+ if (virCommandRun(cmd, NULL) < 0)
+ goto cleanup;
+
+ ret = 0;
+
+ cleanup:
+ virCommandFree(cmd);
+ return ret;
+}
virStorageBackend virStorageBackendZFS = {
.type = VIR_STORAGE_POOL_ZFS,
@@ -333,6 +388,8 @@ virStorageBackend virStorageBackendZFS = {
.refreshPool = virStorageBackendZFSRefreshPool,
.createVol = virStorageBackendZFSCreateVol,
.deleteVol = virStorageBackendZFSDeleteVol,
+ .buildPool = virStorageBackendZFSBuildPool,
+ .deletePool = virStorageBackendZFSDeletePool,
.uploadVol = virStorageBackendVolUploadLocal,
.downloadVol = virStorageBackendVolDownloadLocal,
};
diff --git a/tests/storagepoolxml2xmlin/pool-zfs-sourcedev.xml b/tests/storagepoolxml2xmlin/pool-zfs-sourcedev.xml
new file mode 100644
index 0000000..b0e0a96
--- /dev/null
+++ b/tests/storagepoolxml2xmlin/pool-zfs-sourcedev.xml
@@ -0,0 +1,8 @@
+<pool type="zfs">
+ <name>zfs</name>
+ <uuid>429126d2-f4bb-45b0-b336-2e81dc6d241c</uuid>
+ <source>
+ <name>testpool</name>
+ <device path="/dev/ada1"/>
+ </source>
+</pool>
diff --git a/tests/storagepoolxml2xmlin/pool-zfs.xml b/tests/storagepoolxml2xmlin/pool-zfs.xml
new file mode 100644
index 0000000..813342f
--- /dev/null
+++ b/tests/storagepoolxml2xmlin/pool-zfs.xml
@@ -0,0 +1,7 @@
+<pool type="zfs">
+ <name>zfs</name>
+ <uuid>024835f8-52b5-4226-b2b4-8c0d3afa5b2f</uuid>
+ <source>
+ <name>testpool</name>
+ </source>
+</pool>
diff --git a/tests/storagepoolxml2xmlout/pool-zfs-sourcedev.xml b/tests/storagepoolxml2xmlout/pool-zfs-sourcedev.xml
new file mode 100644
index 0000000..bbd2e9f
--- /dev/null
+++ b/tests/storagepoolxml2xmlout/pool-zfs-sourcedev.xml
@@ -0,0 +1,19 @@
+<pool type='zfs'>
+ <name>zfs</name>
+ <uuid>429126d2-f4bb-45b0-b336-2e81dc6d241c</uuid>
+ <capacity unit='bytes'>0</capacity>
+ <allocation unit='bytes'>0</allocation>
+ <available unit='bytes'>0</available>
+ <source>
+ <device path='/dev/ada1'/>
+ <name>testpool</name>
+ </source>
+ <target>
+ <path>/dev/zvol/testpool</path>
+ <permissions>
+ <mode>0755</mode>
+ <owner>-1</owner>
+ <group>-1</group>
+ </permissions>
+ </target>
+</pool>
diff --git a/tests/storagepoolxml2xmlout/pool-zfs.xml b/tests/storagepoolxml2xmlout/pool-zfs.xml
new file mode 100644
index 0000000..ff02329
--- /dev/null
+++ b/tests/storagepoolxml2xmlout/pool-zfs.xml
@@ -0,0 +1,18 @@
+<pool type='zfs'>
+ <name>zfs</name>
+ <uuid>024835f8-52b5-4226-b2b4-8c0d3afa5b2f</uuid>
+ <capacity unit='bytes'>0</capacity>
+ <allocation unit='bytes'>0</allocation>
+ <available unit='bytes'>0</available>
+ <source>
+ <name>testpool</name>
+ </source>
+ <target>
+ <path>/dev/zvol/testpool</path>
+ <permissions>
+ <mode>0755</mode>
+ <owner>-1</owner>
+ <group>-1</group>
+ </permissions>
+ </target>
+</pool>
diff --git a/tests/storagepoolxml2xmltest.c b/tests/storagepoolxml2xmltest.c
index d7ae10b..8a2c0b5 100644
--- a/tests/storagepoolxml2xmltest.c
+++ b/tests/storagepoolxml2xmltest.c
@@ -105,6 +105,8 @@ mymain(void)
DO_TEST("pool-gluster");
DO_TEST("pool-gluster-sub");
DO_TEST("pool-scsi-type-scsi-host-stable");
+ DO_TEST("pool-zfs");
+ DO_TEST("pool-zfs-sourcedev");
return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
}
--
2.0.2
10 years, 1 month
[libvirt] [PATCH 0/3] maint: clean up of struct XXX
by James
I find some struct XXX can be converted to typedefs. And I
clean them up.
James (3):
maint: clean up _virDomainInterfaceStats
maint: clean up _virDomainBlockStats
maint: clean up _virDomainMemoryStat
daemon/remote.c | 2 +-
src/driver.h | 6 +++---
src/libvirt.c | 6 +++---
src/lxc/lxc_driver.c | 8 ++++----
src/openvz/openvz_driver.c | 2 +-
src/qemu/qemu_driver.c | 6 +++---
src/qemu/qemu_monitor_text.c | 2 +-
src/remote/remote_driver.c | 2 +-
src/test/test_driver.c | 4 ++--
src/util/virstats.c | 6 +++---
src/util/virstats.h | 2 +-
src/xen/block_stats.c | 4 ++--
src/xen/block_stats.h | 2 +-
src/xen/xen_driver.c | 4 ++--
src/xen/xen_hypervisor.c | 4 ++--
src/xen/xen_hypervisor.h | 4 ++--
tools/virsh-domain-monitor.c | 6 +++---
17 files changed, 35 insertions(+), 35 deletions(-)
--
1.7.12.4
10 years, 1 month
[libvirt] [PATCH] Fix libvirtd crash when removing metadata
by Erik Skultety
When trying to remove nonexistent metadata from XML, libvirt daemon
crashes due to dereferencing NULL pointer.
Resolves https://bugzilla.redhat.com/show_bug.cgi?id=1143955
---
src/util/virxml.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/util/virxml.c b/src/util/virxml.c
index a91da05..f386956 100644
--- a/src/util/virxml.c
+++ b/src/util/virxml.c
@@ -972,7 +972,9 @@ xmlNodePtr
virXMLFindChildNodeByNs(xmlNodePtr root,
const char *uri)
{
- xmlNodePtr next;
+ xmlNodePtr next = NULL;
+ if (!root)
+ goto cleanup;
for (next = root->children; next; next = next->next) {
if (next->ns &&
@@ -980,6 +982,7 @@ virXMLFindChildNodeByNs(xmlNodePtr root,
return next;
}
+ cleanup:
return NULL;
}
--
1.9.3
10 years, 1 month
[libvirt] [PATCH 0/4] Memory leak fixes
by Ján Tomko
Ján Tomko (4):
Fix leak in x86UpdateHostModel
Fixes for domains with no iothreads
audit: remove redundant NULL assignment
audit: fix memory leak without WITH_AUDIT
src/cpu/cpu_x86.c | 4 +++-
src/qemu/qemu_cgroup.c | 2 +-
src/qemu/qemu_process.c | 6 ++++--
src/util/viraudit.c | 13 +++----------
4 files changed, 11 insertions(+), 14 deletions(-)
--
1.8.5.5
10 years, 1 month
[libvirt] [PATCH] qemu: Don't fail qemuProcessAttach for IOThreads if no JSON
by John Ferlan
While doing some investigation for another bug I found that I could
not qemu-attach to the process and got the following:
error: Operation not supported: JSON monitor is required
while running thru qemuProcessAttach. Since we can only get the data
using the JSON parser and if the guest to be attached to doesn't have
it we shouldn't just fail. See example in virsh qemu-attach for sample
command that failed.
Signed-off-by: John Ferlan <jferlan(a)redhat.com>
---
I also considered removing the call from qemuProcessAttach rather than
this approach.
src/qemu/qemu_monitor.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index 8927dbb..4342088 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -4112,11 +4112,9 @@ qemuMonitorGetIOThreads(qemuMonitorPtr mon,
return -1;
}
- if (!mon->json) {
- virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
- _("JSON monitor is required"));
- return -1;
- }
+ /* Requires JSON to make the query */
+ if (!mon->json)
+ return 0;
return qemuMonitorJSONGetIOThreads(mon, iothreads);
}
--
1.9.3
10 years, 1 month
[libvirt] [PATCH 1/3] bhyve: tests: fix build
by Roman Bogorodskiy
Commit b20d39a introduced a new argument for the
virNetDevTapCreateInBridgePort function, however, its mock
in bhyve tests wasn't updated, so the build failed.
Fix build by adding this new argument to the mock version.
---
tests/bhyvexml2argvmock.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/bhyvexml2argvmock.c b/tests/bhyvexml2argvmock.c
index fa2f14b..0cbea29 100644
--- a/tests/bhyvexml2argvmock.c
+++ b/tests/bhyvexml2argvmock.c
@@ -22,6 +22,7 @@ int virNetDevTapCreateInBridgePort(const char *brname ATTRIBUTE_UNUSED,
char **ifname,
const virMacAddr *macaddr ATTRIBUTE_UNUSED,
const unsigned char *vmuuid ATTRIBUTE_UNUSED,
+ const char *tunpath ATTRIBUTE_UNUSED,
int *tapfd ATTRIBUTE_UNUSED,
int tapfdSize ATTRIBUTE_UNUSED,
virNetDevVPortProfilePtr virtPortProfile ATTRIBUTE_UNUSED,
--
2.1.0
10 years, 1 month
[libvirt] [PATCH] qemu: fix crash with shared disks
by Ján Tomko
Commit f36a94f introduced a double free on all success paths
in qemuSharedDeviceEntryInsert.
Only call qemuSharedDeviceEntryFree on the error path and
set entry to NULL before jumping there if the entry already
is in the hash table.
https://bugzilla.redhat.com/show_bug.cgi?id=1142722
---
src/qemu/qemu_conf.c | 26 ++++++++++++--------------
1 file changed, 12 insertions(+), 14 deletions(-)
diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index ac10b64..adc6caf 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -1011,38 +1011,36 @@ qemuSharedDeviceEntryInsert(virQEMUDriverPtr driver,
const char *name)
{
qemuSharedDeviceEntry *entry = NULL;
- int ret = -1;
if ((entry = virHashLookup(driver->sharedDevices, key))) {
/* Nothing to do if the shared scsi host device is already
* recorded in the table.
*/
- if (qemuSharedDeviceEntryDomainExists(entry, name, NULL)) {
- ret = 0;
- goto cleanup;
+ if (!qemuSharedDeviceEntryDomainExists(entry, name, NULL)) {
+ if (VIR_EXPAND_N(entry->domains, entry->ref, 1) < 0 ||
+ VIR_STRDUP(entry->domains[entry->ref - 1], name) < 0) {
+ /* entry is owned by the hash table here */
+ entry = NULL;
+ goto error;
+ }
}
-
- if (VIR_EXPAND_N(entry->domains, entry->ref, 1) < 0 ||
- VIR_STRDUP(entry->domains[entry->ref - 1], name) < 0)
- goto cleanup;
} else {
if (VIR_ALLOC(entry) < 0 ||
VIR_ALLOC_N(entry->domains, 1) < 0 ||
VIR_STRDUP(entry->domains[0], name) < 0)
- goto cleanup;
+ goto error;
entry->ref = 1;
if (virHashAddEntry(driver->sharedDevices, key, entry))
- goto cleanup;
+ goto error;
}
- ret = 0;
+ return 0;
- cleanup:
+ error:
qemuSharedDeviceEntryFree(entry, NULL);
-
- return ret;
+ return -1;
}
--
1.8.5.5
10 years, 1 month
[libvirt] [PATCH 0/6] Improve backing store error reporting
by Peter Krempa
Peter Krempa (6):
util: Add function to check if a virStorageSource is "empty"
qemu: Drop unused formatting of uuid
util: storage: Allow metadata crawler to report useful errors
qemu: Report better errors from broken backing chains
storage: Improve error message when traversing backing chains
qemu: Improve check for local storage
src/libvirt_private.syms | 1 +
src/qemu/qemu_domain.c | 38 ++++++--------------------------------
src/qemu/qemu_process.c | 5 ++---
src/security/virt-aa-helper.c | 2 +-
src/storage/storage_driver.c | 40 +++++++++++++++++++++++++++++-----------
src/storage/storage_driver.h | 3 ++-
src/util/virstoragefile.c | 20 ++++++++++++++++++++
src/util/virstoragefile.h | 1 +
tests/virstoragetest.c | 2 +-
9 files changed, 63 insertions(+), 49 deletions(-)
--
2.1.0
10 years, 1 month
[libvirt] [PATCH] CVE-2014-3633: qemu: blkiotune: Use correct definition when looking up disk
by Peter Krempa
Live definition was used to look up the disk index while persistent one
was indexed leading to a crash in qemuDomainGetBlockIoTune. Use the
correct def and report a nice error.
Unfortunately it's accessible via read-only connection.
Introduced in: eca96694a7f992be633d48d5ca03cedc9bbc3c9aa (v0.9.8)
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1140724
Reported-by: Luyao Huang <lhuang(a)redhat.com>
Signed-off-by: Peter Krempa <pkrempa(a)redhat.com>
---
src/qemu/qemu_driver.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index a5a49ac..209c40e 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -16317,9 +16317,13 @@ qemuDomainGetBlockIoTune(virDomainPtr dom,
}
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
- int idx = virDomainDiskIndexByName(vm->def, disk, true);
- if (idx < 0)
+ int idx = virDomainDiskIndexByName(persistentDef, disk, true);
+ if (idx < 0) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("disk '%s' was not found in the domain config"),
+ disk);
goto endjob;
+ }
reply = persistentDef->disks[idx]->blkdeviotune;
}
--
2.1.0
10 years, 1 month
[libvirt] [PATCHv6 00/11] bulk stats: QEMU implementation
by Peter Krempa
After my review on Francesco's series I've tested the patches a bit more and found a few problems.
I'm re-sending the series and also I've added a few patches that tweak the documentation for this.
Francesco Romani (8):
qemu: bulk stats: extend internal collection API
qemu: bulk stats: implement CPU stats group
qemu: bulk stats: implement balloon group
qemu: bulk stats: implement VCPU group
qemu: bulk stats: implement interface group
qemu: bulk stats: implement block group
virsh: add options to query bulk stats group
qemu: bulk stats: add block allocation information
Peter Krempa (3):
lib: De-duplicate stats group documentation for all stats functions
lib: Document that virConnectGetAllDomainStats may omit some stats
fields
man: virsh: Add docs for supported stats groups
include/libvirt/libvirt.h.in | 5 +
src/libvirt.c | 76 +++++-
src/qemu/qemu_driver.c | 543 +++++++++++++++++++++++++++++++++++++------
src/qemu/qemu_monitor.c | 26 +++
src/qemu/qemu_monitor.h | 21 ++
src/qemu/qemu_monitor_json.c | 227 +++++++++++++-----
src/qemu/qemu_monitor_json.h | 4 +
tools/virsh-domain-monitor.c | 35 +++
tools/virsh.pod | 51 +++-
9 files changed, 844 insertions(+), 144 deletions(-)
--
2.1.0
10 years, 1 month