[libvirt PATCH] meson: Fix build with -Dtest_coverage=true
by Jiri Denemark
As can be seen in commit 8a62a1592ae00eab4eb153c02661e56b9d8d9032 (from
autoconf era), the coverage flags have to be used also when linking
objects. However, this was not reflected when we switched to meson.
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/meson.build | 1 +
tests/meson.build | 8 ++++++++
tools/nss/meson.build | 2 ++
tools/wireshark/src/meson.build | 3 +++
4 files changed, 14 insertions(+)
diff --git a/src/meson.build b/src/meson.build
index 7c478219d6..980578d5d6 100644
--- a/src/meson.build
+++ b/src/meson.build
@@ -21,6 +21,7 @@ src_dep = declare_dependency(
+ coverage_flags
+ driver_modules_flags
+ win32_link_flags
+ + coverage_flags
),
)
diff --git a/tests/meson.build b/tests/meson.build
index f1d91ca50d..c65487f5c2 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -202,6 +202,9 @@ foreach mock : mock_libs
libvirt_lib,
mock.get('link_with', []),
],
+ link_args: [
+ coverage_flags,
+ ],
)
endforeach
@@ -218,6 +221,7 @@ executable(
],
link_args: [
libvirt_no_indirect,
+ coverage_flags
],
)
@@ -566,6 +570,7 @@ foreach data : tests
],
link_args: [
libvirt_no_indirect,
+ coverage_flags,
],
link_with: [
libvirt_lib,
@@ -644,6 +649,9 @@ foreach data : helpers
link_with: [
data['link_with'],
],
+ link_args: [
+ coverage_flags,
+ ],
export_dynamic: true,
)
endforeach
diff --git a/tools/nss/meson.build b/tools/nss/meson.build
index cf3eec9b24..198936f3d4 100644
--- a/tools/nss/meson.build
+++ b/tools/nss/meson.build
@@ -66,6 +66,7 @@ nss_libvirt_lib = shared_module(
link_args: [
nss_libvirt_syms,
libvirt_export_dynamic,
+ coverage_flags,
],
link_whole: [
nss_libvirt_impl,
@@ -81,6 +82,7 @@ nss_libvirt_guest_lib = shared_library(
link_args: [
nss_libvirt_guest_syms,
libvirt_export_dynamic,
+ coverage_flags,
],
link_whole: [
nss_libvirt_guest_impl,
diff --git a/tools/wireshark/src/meson.build b/tools/wireshark/src/meson.build
index 49ccc9bb86..9b452dc5ca 100644
--- a/tools/wireshark/src/meson.build
+++ b/tools/wireshark/src/meson.build
@@ -12,6 +12,9 @@ shared_library(
xdr_dep,
tools_dep,
],
+ link_args: [
+ coverage_flags
+ ],
install: true,
install_dir: wireshark_plugindir,
)
--
2.30.0
3 years, 11 months
[PATCH] storage: Linstor support
by Rene Peinthor
Implement a LINSTOR backend storage driver.
The Linstor client needs to be installed and it needs to be configured
on the nodes used by the controller.
It supports most pool/vol commands, except for pool-build/pool-delete
and provides a block device in RAW file mode.
Linstor supports more than just DRBD so it would also be possible to have
it provide LVM, ZFS or NVME volumes, but the common case will be to provide
DRBD volumes in a cluster.
Sample pool XML:
<pool type='linstor'>
<name>linstor</name>
<source>
<host name='ubuntu-focal-60'/>
<name>libvirtgrp</name>
</source>
</pool>
<pool/source/name> element must point to an already created LINSTOR
resource-group, which is used to spawn resources/volumes.
<pool/source/host@name> attribute should be the local linstor node name,
if missing it will try to get the hosts uname and use that instead.
Result volume XML sample:
<volume type='block'>
<name>alpine12</name>
<key>libvirtgrp/alpine12</key>
<capacity unit='bytes'>5368709120</capacity>
<allocation unit='bytes'>5540028416</allocation>
<target>
<path>/dev/drbd1000</path>
<format type='raw'/>
</target>
</volume>
Signed-off-by: Rene Peinthor <rene.peinthor(a)linbit.com>
---
docs/schemas/storagepool.rng | 27 +
docs/storage.html.in | 39 +
include/libvirt/libvirt-storage.h | 1 +
meson.build | 6 +
meson_options.txt | 1 +
po/POTFILES.in | 1 +
src/conf/domain_conf.c | 1 +
src/conf/storage_conf.c | 14 +-
src/conf/storage_conf.h | 1 +
src/conf/virstorageobj.c | 4 +-
src/storage/meson.build | 25 +
src/storage/storage_backend.c | 6 +
src/storage/storage_backend_linstor.c | 803 ++++++++++++++++++
src/storage/storage_backend_linstor.h | 23 +
src/storage/storage_backend_linstor_priv.h | 53 ++
src/storage/storage_driver.c | 1 +
src/test/test_driver.c | 1 +
tests/linstorjsondata/broken.json | 1 +
tests/linstorjsondata/resource-group.json | 1 +
.../linstorjsondata/resource-list-test2.json | 332 ++++++++
.../storage-pools-ssdpool.json | 72 ++
tests/linstorjsondata/storage-pools.json | 192 +++++
tests/linstorjsondata/volume-def-list.json | 158 ++++
.../volume-definition-test2.json | 1 +
tests/meson.build | 6 +
tests/storagebackendlinstortest.c | 371 ++++++++
.../storagepoolcapsschemadata/poolcaps-fs.xml | 7 +
.../poolcaps-full.xml | 7 +
tests/storagepoolxml2argvtest.c | 1 +
tests/storagepoolxml2xmlin/pool-linstor.xml | 8 +
tests/storagevolxml2xmlin/vol-linstor.xml | 10 +
tools/virsh-pool.c | 3 +
32 files changed, 2175 insertions(+), 2 deletions(-)
create mode 100644 src/storage/storage_backend_linstor.c
create mode 100644 src/storage/storage_backend_linstor.h
create mode 100644 src/storage/storage_backend_linstor_priv.h
create mode 100644 tests/linstorjsondata/broken.json
create mode 100644 tests/linstorjsondata/resource-group.json
create mode 100644 tests/linstorjsondata/resource-list-test2.json
create mode 100644 tests/linstorjsondata/storage-pools-ssdpool.json
create mode 100644 tests/linstorjsondata/storage-pools.json
create mode 100644 tests/linstorjsondata/volume-def-list.json
create mode 100644 tests/linstorjsondata/volume-definition-test2.json
create mode 100644 tests/storagebackendlinstortest.c
create mode 100644 tests/storagepoolxml2xmlin/pool-linstor.xml
create mode 100644 tests/storagevolxml2xmlin/vol-linstor.xml
diff --git a/docs/schemas/storagepool.rng b/docs/schemas/storagepool.rng
index bd24b8b8d0..9b163e611d 100644
--- a/docs/schemas/storagepool.rng
+++ b/docs/schemas/storagepool.rng
@@ -26,6 +26,7 @@
<ref name="poolgluster"/>
<ref name="poolzfs"/>
<ref name="poolvstorage"/>
+ <ref name="poollinstor"/>
</choice>
</element>
</define>
@@ -224,6 +225,21 @@
</interleave>
</define>
+ <define name="poollinstor">
+ <attribute name="type">
+ <value>linstor</value>
+ </attribute>
+ <interleave>
+ <ref name="commonMetadataNameOptional"/>
+ <ref name="sizing"/>
+ <ref name="features"/>
+ <ref name="sourcelinstor"/>
+ <optional>
+ <ref name="target"/>
+ </optional>
+ </interleave>
+ </define>
+
<define name="sourceinfovendor">
<interleave>
<optional>
@@ -463,6 +479,17 @@
</element>
</define>
+ <define name="sourcelinstor">
+ <element name="source">
+ <interleave>
+ <ref name="sourceinfoname"/>
+ <optional>
+ <ref name="sourceinfohost"/>
+ </optional>
+ </interleave>
+ </element>
+ </define>
+
<define name="sourcefmtfs">
<optional>
<element name="format">
diff --git a/docs/storage.html.in b/docs/storage.html.in
index b2cf343933..9130fbd180 100644
--- a/docs/storage.html.in
+++ b/docs/storage.html.in
@@ -829,5 +829,44 @@
<h3>Valid volume format types</h3>
<p>The valid volume types are the same as for the directory pool.</p>
+
+
+ <h2><a id="StorageBackendLINSTOR">LINSTOR pool</a></h2>
+ <p>
+ This provides a pool using the LINSTOR software-defined-storage.
+ LINSTOR can provide block storage devices based on DRBD or basic
+ LVM/ZFS volumes.
+ </p>
+
+ <p>
+ To use LINSTOR in libvirt, setup a working LINSTOR cluster, documentation
+ for that is in the LINSTOR Users-guide.
+ And create a resource-group that will be used by libvirt, also make sure
+ the resource-group is setup in a way so that all nodes you want to use with libvirt
+ will create a resource. So either use diskless-on-remaining or make sure
+ replica-count is the same as you have nodes in your cluster.
+ </p>
+
+ <p><span class="since">Since 7.1.0</span></p>.
+
+ <h3>Example pool input</h3>
+ <pre>
+ <pool type="linstor">
+ <name>linstorpool</name>
+ <source>
+ <name>libvirtrscgrp</name>
+ <host name="linstornode">/>
+ </source>
+ </pool></pre>
+
+ <h3>Valid pool format types</h3>
+ <p>
+ The LINSTOR volume pool does not use the pool format type element.
+ </p>
+
+ <h3>Valid volume format types</h3>
+ <p>
+ The LINSTOR volume pool does not use the volume format type element.
+ </p>
</body>
</html>
diff --git a/include/libvirt/libvirt-storage.h b/include/libvirt/libvirt-storage.h
index 089e1e0bd1..6876ce6c5a 100644
--- a/include/libvirt/libvirt-storage.h
+++ b/include/libvirt/libvirt-storage.h
@@ -245,6 +245,7 @@ typedef enum {
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS = 1 << 17,
VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE = 1 << 18,
VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI_DIRECT = 1 << 19,
+ VIR_CONNECT_LIST_STORAGE_POOLS_LINSTOR = 1 << 20,
} virConnectListAllStoragePoolsFlags;
int virConnectListAllStoragePools(virConnectPtr conn,
diff --git a/meson.build b/meson.build
index b5164f68ed..7c3d8be9fc 100644
--- a/meson.build
+++ b/meson.build
@@ -1899,6 +1899,11 @@ if conf.has('WITH_LIBVIRTD')
error('Need libiscsi for iscsi-direct storage driver')
endif
+ if not get_option('storage_linstor').disabled()
+ use_storage = true
+ conf.set('WITH_STORAGE_LINSTOR', 1)
+ endif
+
if not get_option('storage_lvm').disabled()
lvm_enable = true
lvm_progs = [
@@ -2315,6 +2320,7 @@ storagedriver_summary = {
'Dir': conf.has('WITH_STORAGE_DIR'),
'FS': conf.has('WITH_STORAGE_FS'),
'NetFS': conf.has('WITH_STORAGE_FS'),
+ 'Linstor': conf.has('WITH_STORAGE_LINSTOR'),
'LVM': conf.has('WITH_STORAGE_LVM'),
'iSCSI': conf.has('WITH_STORAGE_ISCSI'),
'iscsi-direct': conf.has('WITH_STORAGE_ISCSI_DIRECT'),
diff --git a/meson_options.txt b/meson_options.txt
index e5d79c2b6b..247d88e0ee 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -79,6 +79,7 @@ option('storage_fs', type: 'feature', value: 'auto', description: 'FileSystem ba
option('storage_gluster', type: 'feature', value: 'auto', description: 'Gluster backend for the storage driver')
option('storage_iscsi', type: 'feature', value: 'auto', description: 'iscsi backend for the storage driver')
option('storage_iscsi_direct', type: 'feature', value: 'auto', description: 'iscsi-direct backend for the storage driver')
+option('storage_linstor', type: 'feature', value: 'auto', description: 'Linstor backend for the storage driver')
option('storage_lvm', type: 'feature', value: 'auto', description: 'LVM backend for the storage driver')
option('storage_mpath', type: 'feature', value: 'auto', description: 'mpath backend for the storage driver')
option('storage_rbd', type: 'feature', value: 'auto', description: 'RADOS Block Device backend for the storage driver')
diff --git a/po/POTFILES.in b/po/POTFILES.in
index 14636d4b93..5d8ecfc61c 100644
--- a/po/POTFILES.in
+++ b/po/POTFILES.in
@@ -214,6 +214,7 @@
@SRCDIR(a)src/storage/storage_backend_gluster.c
@SRCDIR(a)src/storage/storage_backend_iscsi.c
@SRCDIR(a)src/storage/storage_backend_iscsi_direct.c
+@SRCDIR(a)src/storage/storage_backend_linstor.c
@SRCDIR(a)src/storage/storage_backend_logical.c
@SRCDIR(a)src/storage/storage_backend_mpath.c
@SRCDIR(a)src/storage/storage_backend_rbd.c
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 01b7187637..cdf1da81c8 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -31333,6 +31333,7 @@ virDomainStorageSourceTranslateSourcePool(virStorageSourcePtr src,
case VIR_STORAGE_POOL_SCSI:
case VIR_STORAGE_POOL_ZFS:
case VIR_STORAGE_POOL_VSTORAGE:
+ case VIR_STORAGE_POOL_LINSTOR:
if (!(src->path = virStorageVolGetPath(vol)))
return -1;
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
index 0c50529ace..9a0dda6374 100644
--- a/src/conf/storage_conf.c
+++ b/src/conf/storage_conf.c
@@ -60,7 +60,7 @@ VIR_ENUM_IMPL(virStoragePool,
"logical", "disk", "iscsi",
"iscsi-direct", "scsi", "mpath",
"rbd", "sheepdog", "gluster",
- "zfs", "vstorage",
+ "zfs", "vstorage", "linstor"
);
VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
@@ -304,6 +304,18 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
.formatToString = virStorageFileFormatTypeToString,
},
},
+ {.poolType = VIR_STORAGE_POOL_LINSTOR,
+ .poolOptions = {
+ .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
+ VIR_STORAGE_POOL_SOURCE_NETWORK |
+ VIR_STORAGE_POOL_SOURCE_NAME),
+ },
+ .volOptions = {
+ .defaultFormat = VIR_STORAGE_FILE_RAW,
+ .formatFromString = virStorageVolumeFormatFromString,
+ .formatToString = virStorageFileFormatTypeToString,
+ }
+ },
};
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
index ffd406e093..716bde942f 100644
--- a/src/conf/storage_conf.h
+++ b/src/conf/storage_conf.h
@@ -110,6 +110,7 @@ typedef enum {
VIR_STORAGE_POOL_GLUSTER, /* Gluster device */
VIR_STORAGE_POOL_ZFS, /* ZFS */
VIR_STORAGE_POOL_VSTORAGE, /* Virtuozzo Storage */
+ VIR_STORAGE_POOL_LINSTOR, /* Linstor Storage */
VIR_STORAGE_POOL_LAST,
} virStoragePoolType;
diff --git a/src/conf/virstorageobj.c b/src/conf/virstorageobj.c
index 9fe8b3f28e..4a2a924eb2 100644
--- a/src/conf/virstorageobj.c
+++ b/src/conf/virstorageobj.c
@@ -1461,13 +1461,15 @@ virStoragePoolObjSourceFindDuplicateCb(const void *payload,
case VIR_STORAGE_POOL_FS:
case VIR_STORAGE_POOL_LOGICAL:
case VIR_STORAGE_POOL_DISK:
+ case VIR_STORAGE_POOL_LINSTOR:
case VIR_STORAGE_POOL_ZFS:
if ((data->def->type == VIR_STORAGE_POOL_ISCSI ||
data->def->type == VIR_STORAGE_POOL_ISCSI_DIRECT ||
data->def->type == VIR_STORAGE_POOL_FS ||
data->def->type == VIR_STORAGE_POOL_LOGICAL ||
data->def->type == VIR_STORAGE_POOL_DISK ||
- data->def->type == VIR_STORAGE_POOL_ZFS) &&
+ data->def->type == VIR_STORAGE_POOL_ZFS ||
+ data->def->type == VIR_STORAGE_POOL_LINSTOR) &&
virStoragePoolObjSourceMatchTypeDEVICE(obj, data->def))
return 1;
break;
diff --git a/src/storage/meson.build b/src/storage/meson.build
index b4cefe9a89..d58519ba24 100644
--- a/src/storage/meson.build
+++ b/src/storage/meson.build
@@ -43,6 +43,10 @@ storage_backend_iscsi_direct_sources = [
'storage_backend_iscsi_direct.c',
]
+storage_backend_linstor_sources = [
+ 'storage_backend_linstor.c',
+]
+
storage_lvm_backend_sources = [
'storage_backend_logical.c',
]
@@ -217,6 +221,27 @@ if conf.has('WITH_STORAGE_ISCSI_DIRECT')
}
endif
+if conf.has('WITH_STORAGE_LINSTOR')
+ storage_backend_linstor_priv_lib = static_library(
+ 'virt_storage_backend_linstor_priv',
+ storage_backend_linstor_sources,
+ dependencies: [
+ src_dep,
+ ],
+ include_directories: [
+ conf_inc_dir,
+ ],
+ )
+
+ virt_modules += {
+ 'name': 'virt_storage_backend_linstor',
+ 'link_whole': [
+ storage_backend_linstor_priv_lib,
+ ],
+ 'install_dir': storage_backend_install_dir,
+ }
+endif
+
if conf.has('WITH_STORAGE_LVM')
virt_modules += {
'name': 'virt_storage_backend_logical',
diff --git a/src/storage/storage_backend.c b/src/storage/storage_backend.c
index 2bce445575..27c2ed5e5b 100644
--- a/src/storage/storage_backend.c
+++ b/src/storage/storage_backend.c
@@ -70,6 +70,9 @@
#if WITH_STORAGE_VSTORAGE
# include "storage_backend_vstorage.h"
#endif
+#if WITH_STORAGE_LINSTOR
+# include "storage_backend_linstor.h"
+#endif
#define VIR_FROM_THIS VIR_FROM_STORAGE
@@ -144,6 +147,9 @@ virStorageBackendDriversRegister(bool allbackends G_GNUC_UNUSED)
#if WITH_STORAGE_VSTORAGE
VIR_STORAGE_BACKEND_REGISTER(virStorageBackendVstorageRegister, "vstorage");
#endif
+#if WITH_STORAGE_LINSTOR
+ VIR_STORAGE_BACKEND_REGISTER(virStorageBackendLinstorRegister, "linstor");
+#endif
return 0;
}
diff --git a/src/storage/storage_backend_linstor.c b/src/storage/storage_backend_linstor.c
new file mode 100644
index 0000000000..65463eb26b
--- /dev/null
+++ b/src/storage/storage_backend_linstor.c
@@ -0,0 +1,803 @@
+/*
+ * storage_backend_linstor.c: storage backend for linstor volume handling
+ *
+ * Copyright (C) 2020-2021 Rene Peinthor
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+
+#include "storage_backend_linstor.h"
+#define LIBVIRT_STORAGE_BACKEND_LINSTOR_PRIV_H_ALLOW
+#include "storage_backend_linstor_priv.h"
+#include "virerror.h"
+#include "virjson.h"
+#include "virstring.h"
+#include "virlog.h"
+#include "viralloc.h"
+#include "storage_conf.h"
+#include "storage_util.h"
+
+#include <sys/utsname.h>
+
+#define VIR_FROM_THIS VIR_FROM_STORAGE
+
+VIR_LOG_INIT("storage.storage_backend_linstor");
+
+
+#define LINSTORCLI "linstor"
+
+
+/**
+ * @brief virStorageBackendLinstorGetNodeName
+ * Get the configured linstor node name, checks pool host[0]
+ * if node isn't set there, it will try to get hostname and use that.
+ * @param pool Pool configuration
+ * @param nodenameOut Retrieved nodename will be copied here, caller is responsible to free.
+ * @return -1 on error, otherwise 0
+ */
+static int
+virStorageBackendLinstorGetNodeName(virStoragePoolObjPtr pool, char **nodenameOut)
+{
+ int ret = 0;
+ struct utsname host;
+ virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
+ if (def->source.nhost > 0 && def->source.hosts[0].name != NULL)
+ *nodenameOut = g_strdup(def->source.hosts[0].name);
+ else if (uname(&host) == 0)
+ *nodenameOut = g_strdup(host.nodename);
+ else
+ ret = -1;
+
+ return ret;
+}
+
+
+static virCommandPtr
+virStorageBackendLinstorPrepLinstorCmd(bool machineout)
+{
+ if (machineout)
+ return virCommandNewArgList(LINSTORCLI, "-m", "--output-version", "v1", NULL);
+ else
+ return virCommandNewArgList(LINSTORCLI, NULL);
+}
+
+
+/**
+ * @brief virStorageBackendLinstorUnpackLinstorJSON
+ * Linstor client results are packed into an array, as results usually contain
+ * a list of apicallrcs. But lists usually only have 1 entry.
+ * @param replyArr linstor reply array json
+ * @return Pointer to the first array element or NULL if no array or empty
+ */
+static virJSONValuePtr
+virStorageBackendLinstorUnpackLinstorJSON(virJSONValuePtr replyArr)
+{
+ if (replyArr == NULL) {
+ return NULL;
+ }
+
+ if (!virJSONValueIsArray(replyArr)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Root Linstor list result is expected to be an array"));
+ return NULL;
+ }
+
+ if (virJSONValueArraySize(replyArr) == 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Empty reply from Linstor client"));
+ return NULL;
+ }
+
+ return virJSONValueArrayGet(replyArr, 0);
+}
+
+
+int
+virStorageBackendLinstorFilterRscDefsForRscGroup(const char *resourceGroup,
+ const char *output,
+ virJSONValuePtr rscDefArrayOut)
+{
+ int ret = -1;
+ virJSONValuePtr replyArr = NULL;
+ virJSONValuePtr rscDefArr = NULL;
+ size_t i;
+
+ replyArr = virJSONValueFromString(output);
+
+ rscDefArr = virStorageBackendLinstorUnpackLinstorJSON(replyArr);
+ if (rscDefArr == NULL) {
+ goto cleanup;
+ }
+
+ for (i = 0; i < virJSONValueArraySize(rscDefArr); i++) {
+ virJSONValuePtr rscDefObj = virJSONValueArrayGet(rscDefArr, i);
+
+ if (g_ascii_strcasecmp(virJSONValueObjectGetString(rscDefObj, "resource_group_name"),
+ resourceGroup) == 0) {
+
+ virJSONValueArrayAppendString(rscDefArrayOut,
+ g_strdup(virJSONValueObjectGetString(rscDefObj, "name")));
+ }
+ }
+
+ ret = 0;
+ cleanup:
+ virJSONValueFree(replyArr);
+ return ret;
+}
+
+
+int
+virStorageBackendLinstorParseResourceGroupList(const char *resourceGroup,
+ const char *output,
+ virJSONValuePtr *storPoolArrayOut)
+{
+ int ret = -1;
+ bool rscGrpFound = false;
+ virJSONValuePtr replyArr = NULL;
+ virJSONValuePtr rscGrpArr = NULL;
+ virJSONValuePtr rscGrpSelFilterObj = NULL;
+ virJSONValuePtr storPoolsArr = NULL;
+ size_t i;
+
+ replyArr = virJSONValueFromString(output);
+
+ rscGrpArr = virStorageBackendLinstorUnpackLinstorJSON(replyArr);
+ if (rscGrpArr == NULL) {
+ goto cleanup;
+ }
+
+ for (i = 0; i < virJSONValueArraySize(rscGrpArr); i++) {
+ virJSONValuePtr rscGrpObj = virJSONValueArrayGet(rscGrpArr, i);
+
+ if (g_ascii_strcasecmp(virJSONValueObjectGetString(rscGrpObj, "name"),
+ resourceGroup) == 0) {
+ rscGrpFound = true;
+
+ rscGrpSelFilterObj = virJSONValueObjectGetObject(rscGrpObj, "select_filter");
+ if (rscGrpSelFilterObj != NULL) {
+ storPoolsArr = virJSONValueObjectGetArray(rscGrpSelFilterObj, "storage_pool_list");
+
+ *storPoolArrayOut = virJSONValueCopy(storPoolsArr);
+ }
+ break;
+ }
+ }
+
+ if (!rscGrpFound) {
+ virReportError(VIR_ERR_INVALID_STORAGE_POOL,
+ _("Specified resource group '%s' not found in linstor"), resourceGroup);
+ goto cleanup;
+ }
+
+ ret = 0;
+ cleanup:
+ virJSONValueFree(replyArr);
+ return ret;
+}
+
+
+/**
+ * @brief virStorageBackendLinstorParseStoragePoolList
+ * Parses a storage pool list result and updates the pools capacity, allocation numbers,
+ * for the given node.
+ * @param pool Pool object to update
+ * @param nodename Node name of which storage pools are taken for the update.
+ * @param output JSON output content from the `linstor storage-pool list` command
+ * @return -1 on error, 0 on success
+ */
+int
+virStorageBackendLinstorParseStoragePoolList(virStoragePoolDefPtr pool,
+ const char* nodename,
+ const char *output)
+{
+ int ret = -1;
+ virJSONValuePtr replyArr = NULL;
+ virJSONValuePtr storpoolArr = NULL;
+ unsigned long long capacity = 0;
+ unsigned long long freeCapacity = 0;
+ size_t i;
+
+ replyArr = virJSONValueFromString(output);
+
+ storpoolArr = virStorageBackendLinstorUnpackLinstorJSON(replyArr);
+ if (storpoolArr == NULL) {
+ goto cleanup;
+ }
+
+ if (!virJSONValueIsArray(storpoolArr)) {
+ // probably an ApiCallRc then, with an error
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Storage pool list not recieved"));
+ goto cleanup;
+ }
+
+ for (i = 0; i < virJSONValueArraySize(storpoolArr); i++) {
+ unsigned long long storCapacity = 0;
+ unsigned long long storFree = 0;
+ virJSONValuePtr storPoolObj = virJSONValueArrayGet(storpoolArr, i);
+
+ if (!virJSONValueIsObject(storPoolObj)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unable to parse storage pool object for pool '%s'"),
+ pool->name);
+ goto cleanup;
+ }
+
+ if (g_ascii_strcasecmp(virJSONValueObjectGetString(storPoolObj, "node_name"), nodename) == 0) {
+ if (g_str_equal(virJSONValueObjectGetString(storPoolObj, "provider_kind"), "DISKLESS")) {
+ /* ignore diskless pools, as they have no capacity */
+ continue;
+ }
+
+ if (virJSONValueObjectGetNumberUlong(storPoolObj, "total_capacity", &storCapacity)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unable to parse storage pool '%s' capacity"),
+ virJSONValueObjectGetString(storPoolObj, "storage_pool_name"));
+ goto cleanup;
+ }
+ if (virJSONValueObjectGetNumberUlong(storPoolObj, "free_capacity", &storFree)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unable to parse storage pool '%s' free capacity"),
+ virJSONValueObjectGetString(storPoolObj, "storage_pool_name"));
+ goto cleanup;
+ }
+ capacity += storCapacity * 1024; // linstor reports in KiB
+ freeCapacity += storFree * 1024; // linstor reports in KiB
+ }
+ }
+
+ pool->capacity = capacity;
+ pool->available = freeCapacity;
+ pool->allocation = capacity - freeCapacity;
+
+ ret = 0;
+
+ cleanup:
+ virJSONValueFree(replyArr);
+ return ret;
+}
+
+
+/**
+ * @brief virStorageBackendLinstorParseVolumeDefinition
+ * Parses the machine output of `linstor volume-definition list` and updates
+ * the virStorageVolDef capacity.
+ * @param vol Volume to update the capacity
+ * @param output JSON output of `linstor volume-definition list -r ...`
+ * @return -1 on error, 0 on success
+ */
+int
+virStorageBackendLinstorParseVolumeDefinition(virStorageVolDefPtr vol,
+ const char *output)
+{
+ int ret = -1;
+ virJSONValuePtr replyArr = NULL;
+ virJSONValuePtr resourceDefArr = NULL;
+ size_t i;
+
+ replyArr = virJSONValueFromString(output);
+
+ resourceDefArr = virStorageBackendLinstorUnpackLinstorJSON(replyArr);
+ if (resourceDefArr == NULL) {
+ goto cleanup;
+ }
+
+ if (!virJSONValueIsArray(resourceDefArr)) {
+ /* probably an ApiCallRc then, with an error */
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Volume definition list not recieved"));
+ goto cleanup;
+ }
+
+ for (i = 0; i < virJSONValueArraySize(resourceDefArr); i++) {
+ unsigned long long volDefCapacityKiB = 0;
+ virJSONValuePtr resourceDefObj = virJSONValueArrayGet(resourceDefArr, i);
+
+ if (resourceDefObj == NULL || !virJSONValueIsObject(resourceDefObj)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Unable to parse resource definition object"));
+ goto cleanup;
+ }
+
+ if (g_ascii_strcasecmp(virJSONValueObjectGetString(resourceDefObj, "name"), vol->name) == 0) {
+ virJSONValuePtr volumeDefArr = virJSONValueObjectGet(resourceDefObj, "volume_definitions");
+ virJSONValuePtr volumeDefObj = NULL;
+
+ if (volumeDefArr == NULL || !virJSONValueIsArray(volumeDefArr)
+ || virJSONValueArraySize(volumeDefArr) == 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Volume definition list incorrect for resource definition '%s'"),
+ vol->name);
+ goto cleanup;
+ }
+
+ volumeDefObj = virJSONValueArrayGet(volumeDefArr, 0);
+ if (virJSONValueObjectGetNumberUlong(volumeDefObj, "size_kib", &volDefCapacityKiB)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unable to parse volume definition size for resource '%s'"),
+ vol->name);
+ goto cleanup;
+ }
+
+ /* linstor reports in KiB */
+ vol->target.capacity = volDefCapacityKiB * 1024;
+ break;
+ }
+ }
+
+ ret = 0;
+
+ cleanup:
+ virJSONValueFree(replyArr);
+ return ret;
+}
+
+
+static int
+virStorageBackendLinstorRefreshVolFromJSON(const char *sourceName,
+ virStorageVolDefPtr vol,
+ virJSONValuePtr linstorResObj,
+ const char *volumeDefListOutput)
+{
+ virJSONValuePtr volumesArr = NULL;
+ virJSONValuePtr volumeObj = NULL;
+ long long alloc_kib = 0;
+
+ volumesArr = virJSONValueObjectGet(linstorResObj, "volumes");
+
+ if (volumesArr != NULL && !virJSONValueIsArray(volumesArr)) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("'volumes' not found in resource object JSON"));
+ return -1;
+ }
+
+ volumeObj = virJSONValueArrayGet(volumesArr, 0);
+
+ vol->type = VIR_STORAGE_VOL_BLOCK;
+ VIR_FREE(vol->key);
+ vol->key = g_strdup_printf("%s/%s", sourceName, vol->name);
+ VIR_FREE(vol->target.path);
+ vol->target.path = g_strdup(virJSONValueObjectGetString(volumeObj, "device_path"));
+ vol->target.format = VIR_STORAGE_FILE_RAW;
+
+ virJSONValueObjectGetNumberLong(volumeObj, "allocated_size_kib", &alloc_kib);
+
+ if (alloc_kib >= 0)
+ vol->target.allocation = alloc_kib * 1024;
+ else
+ vol->target.allocation = 0;
+
+ if (volumeDefListOutput != NULL) {
+ return virStorageBackendLinstorParseVolumeDefinition(vol, volumeDefListOutput);
+ }
+
+ return 0;
+}
+
+
+static int
+virStorageBackendLinstorRefreshVol(virStoragePoolObjPtr pool,
+ virStorageVolDefPtr vol)
+{
+ int ret = -1;
+ g_autofree char *output = NULL;
+ g_autofree char *outputVolDef = NULL;
+ g_autofree char *nodename = NULL;
+ g_autoptr(virCommand) cmdResList = NULL;
+ g_autoptr(virCommand) cmdVolDefList = NULL;
+ virJSONValuePtr replyArr = NULL;
+ virJSONValuePtr rscArr = NULL;
+ virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
+
+ if (virStorageBackendLinstorGetNodeName(pool, &nodename))
+ return -1;
+
+ cmdResList = virStorageBackendLinstorPrepLinstorCmd(true);
+ virCommandAddArgList(cmdResList, "resource", "list", "-n", nodename, "-r", vol->name, NULL);
+ virCommandSetOutputBuffer(cmdResList, &output);
+ if (virCommandRun(cmdResList, NULL) < 0)
+ return -1;
+
+ cmdVolDefList = virStorageBackendLinstorPrepLinstorCmd(true);
+ virCommandAddArgList(cmdVolDefList, "volume-definition", "list", "-r", vol->name, NULL);
+ virCommandSetOutputBuffer(cmdVolDefList, &outputVolDef);
+ if (virCommandRun(cmdVolDefList, NULL) < 0)
+ return -1;
+
+ replyArr = virJSONValueFromString(output);
+
+ rscArr = virStorageBackendLinstorUnpackLinstorJSON(replyArr);
+ if (rscArr == NULL) {
+ goto cleanup;
+ }
+
+ if (!virJSONValueIsArray(rscArr)) {
+ // probably an ApiCallRc then, with an error
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Resource list not recieved"));
+ goto cleanup;
+ }
+
+ if (virJSONValueArraySize(rscArr) != 1) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Couldn't find resource '%s' in Linstor resource list JSON"), vol->name);
+ goto cleanup;
+ }
+
+ ret = virStorageBackendLinstorRefreshVolFromJSON(
+ def->source.name, vol, virJSONValueArrayGet(rscArr, 0), outputVolDef);
+
+ cleanup:
+ virJSONValueFree(rscArr);
+ return ret;
+}
+
+
+static int
+virStorageBackendLinstorAddVolume(virStoragePoolObjPtr pool,
+ virJSONValuePtr resourceObj,
+ const char *outputVolDef)
+{
+ g_autoptr(virStorageVolDef) vol = NULL;
+ virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
+
+ if (resourceObj == NULL) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Missing disk info when adding volume"));
+ return -1;
+ }
+
+ vol = g_new0(virStorageVolDef, 1);
+
+ vol->name = g_strdup(virJSONValueObjectGetString(resourceObj, "name"));
+
+ if (virStorageBackendLinstorRefreshVolFromJSON(def->source.name,
+ vol, resourceObj, outputVolDef) < 0) {
+ virStorageVolDefFree(vol);
+ return -1;
+ }
+
+ if (virStoragePoolObjAddVol(pool, vol) < 0) {
+ virStorageVolDefFree(vol);
+ return -1;
+ }
+ vol = NULL;
+
+ return 0;
+}
+
+
+static bool
+virStorageBackendLinstorStringInJSONArray(virJSONValuePtr arr, const char *string)
+{
+ size_t i;
+ for (i = 0; i < virJSONValueArraySize(arr); i++) {
+ if (g_ascii_strcasecmp(virJSONValueGetString(virJSONValueArrayGet(arr, i)), string) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+int
+virStorageBackendLinstorParseResourceList(virStoragePoolObjPtr pool,
+ const char *nodeName,
+ virJSONValuePtr rscDefFilterArr,
+ const char *outputRscList,
+ const char *outputVolDef)
+{
+ int ret = -1;
+ virJSONValuePtr replyArr = NULL;
+ virJSONValuePtr rscListArr = NULL;
+ size_t i;
+
+ replyArr = virJSONValueFromString(outputRscList);
+
+ rscListArr = virStorageBackendLinstorUnpackLinstorJSON(replyArr);
+ if (rscListArr == NULL) {
+ goto cleanup;
+ }
+
+ if (!virJSONValueIsArray(rscListArr)) {
+ // probably an ApiCallRc then, with an error
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Storage pool list not recieved"));
+ goto cleanup;
+ }
+
+ for (i = 0; i < virJSONValueArraySize(rscListArr); i++) {
+ virJSONValuePtr rscObj = virJSONValueArrayGet(rscListArr, i);
+
+ if (g_ascii_strcasecmp(virJSONValueObjectGetString(rscObj, "node_name"), nodeName) == 0 &&
+ virStorageBackendLinstorStringInJSONArray(rscDefFilterArr,
+ virJSONValueObjectGetString(rscObj, "name"))) {
+ if (virStorageBackendLinstorAddVolume(pool, rscObj, outputVolDef)) {
+ goto cleanup;
+ }
+ }
+ }
+
+ ret = 0;
+
+ cleanup:
+ virJSONValueFree(rscListArr);
+ return ret;
+}
+
+static int
+virStorageBackendLinstorRefreshAllVol(virStoragePoolObjPtr pool)
+{
+ int ret = -1;
+ g_autofree char *output = NULL;
+ g_autofree char *outputVolDef = NULL;
+ g_autofree char *nodename = NULL;
+ g_autoptr(virCommand) cmdRscList = NULL;
+ g_autoptr(virCommand) cmdVolDefList = NULL;
+ virJSONValuePtr rscDefFilterArr = virJSONValueNewArray();
+ virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
+
+ /* Get all resources usable on that node */
+ if (virStorageBackendLinstorGetNodeName(pool, &nodename)) {
+ goto cleanup;
+ }
+
+ cmdRscList = virStorageBackendLinstorPrepLinstorCmd(true);
+ virCommandAddArgList(cmdRscList, "resource", "list", "-n", nodename, NULL);
+ virCommandSetOutputBuffer(cmdRscList, &output);
+ if (virCommandRun(cmdRscList, NULL) < 0)
+ goto cleanup;
+
+ /* Get a list of resources that belong to the rsc group for filtering */
+ cmdVolDefList = virStorageBackendLinstorPrepLinstorCmd(true);
+ virCommandAddArgList(cmdVolDefList, "volume-definition", "list", NULL);
+ virCommandSetOutputBuffer(cmdVolDefList, &outputVolDef);
+ if (virCommandRun(cmdVolDefList, NULL) < 0) {
+ goto cleanup;
+ }
+
+ /* resource belonging to the resource group will be stored in rscDefFilterArr */
+ if (virStorageBackendLinstorFilterRscDefsForRscGroup(def->source.name,
+ outputVolDef,
+ rscDefFilterArr)) {
+ goto cleanup;
+ }
+
+ ret = virStorageBackendLinstorParseResourceList(pool,
+ nodename,
+ rscDefFilterArr,
+ output,
+ outputVolDef);
+
+ cleanup:
+ virJSONValueFree(rscDefFilterArr);
+ return ret;
+}
+
+
+/**
+ * @brief virStorageBackendLinstorGetRscGrpPools
+ * Retrieves the set storage pools used in resource group.
+ * On success caller is responsible to free the virJSONValuePtr.
+ * @param rscgrpname resource group name to get the storage pools
+ * @param storagePoolsOut virJSONArray with used storage pools
+ * @return -1 on error, 0 on success
+ */
+static int
+virStorageBackendLinstorGetRscGrpPools(const char* rscgrpname, virJSONValuePtr *storagePoolsOut)
+{
+ g_autofree char *outputRscGrp = NULL;
+ g_autoptr(virCommand) cmdRscGrpList = NULL;
+
+ cmdRscGrpList = virStorageBackendLinstorPrepLinstorCmd(true);
+ virCommandAddArgList(cmdRscGrpList, "resource-group", "list", "-r", rscgrpname, NULL);
+ virCommandSetOutputBuffer(cmdRscGrpList, &outputRscGrp);
+ if (virCommandRun(cmdRscGrpList, NULL) < 0)
+ return -1;
+
+ if (virStorageBackendLinstorParseResourceGroupList(rscgrpname,
+ outputRscGrp,
+ storagePoolsOut)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int
+virStorageBackendLinstorRefreshPool(virStoragePoolObjPtr pool)
+{
+ size_t i;
+ g_autofree char *outputStorPoolList = NULL;
+ g_autofree char *nodename = NULL;
+ g_autoptr(virCommand) cmdStorPoolList = NULL;
+ virJSONValuePtr storagePoolArr = NULL;
+ virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
+
+ if (virStorageBackendLinstorGetNodeName(pool, &nodename))
+ return -1;
+
+ if (virStorageBackendLinstorGetRscGrpPools(def->source.name, &storagePoolArr))
+ return -1;
+
+ /* Get storage pools used in the used resource group */
+ cmdStorPoolList = virStorageBackendLinstorPrepLinstorCmd(true);
+ virCommandAddArgList(cmdStorPoolList, "storage-pool", "list", "-n", nodename, NULL);
+
+ if (storagePoolArr != NULL && virJSONValueArraySize(storagePoolArr) > 0) {
+ virCommandAddArgList(cmdStorPoolList, "-s", NULL);
+ for (i = 0; i < virJSONValueArraySize(storagePoolArr); i++) {
+ virCommandAddArg(cmdStorPoolList,
+ virJSONValueGetString(virJSONValueArrayGet(storagePoolArr, i)));
+ }
+
+ virJSONValueFree(storagePoolArr);
+ }
+
+ virCommandSetOutputBuffer(cmdStorPoolList, &outputStorPoolList);
+ if (virCommandRun(cmdStorPoolList, NULL) < 0)
+ return -1;
+
+ /* update capacity and allocated from used storage pools */
+ if (virStorageBackendLinstorParseStoragePoolList(virStoragePoolObjGetDef(pool),
+ nodename,
+ outputStorPoolList) < 0)
+ return -1;
+
+ /* Get volumes used in the resource group and add */
+ return virStorageBackendLinstorRefreshAllVol(pool);
+}
+
+static int
+virStorageBackendLinstorCreateVol(virStoragePoolObjPtr pool,
+ virStorageVolDefPtr vol)
+{
+ virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
+ g_autoptr(virCommand) cmdRscGrp = NULL;
+
+ VIR_DEBUG("Creating Linstor image %s/%s with size %llu",
+ def->source.name, vol->name, vol->target.capacity);
+
+ if (!vol->target.capacity) {
+ virReportError(VIR_ERR_NO_SUPPORT, "%s",
+ _("volume capacity required for this storage pool"));
+ return -1;
+ }
+
+ if (vol->target.format != VIR_STORAGE_FILE_RAW) {
+ virReportError(VIR_ERR_NO_SUPPORT, "%s",
+ _("only RAW volumes are supported by this storage pool"));
+ return -1;
+ }
+
+ if (vol->target.encryption != NULL) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("storage pool does not support encrypted volumes"));
+ return -1;
+ }
+
+ /* spawn resource */
+ cmdRscGrp = virStorageBackendLinstorPrepLinstorCmd(false);
+ virCommandAddArgList(cmdRscGrp, "resource-group", "spawn",
+ "--partial", def->source.name, vol->name, NULL);
+ virCommandAddArgFormat(cmdRscGrp, "%lluKiB", vol->target.capacity / 1024);
+ if (virCommandRun(cmdRscGrp, NULL) < 0)
+ return -1;
+
+ /* set volume path and key */
+ /* we could skip getting the capacity as we already know it */
+ return virStorageBackendLinstorRefreshVol(pool, vol);
+}
+
+
+static int
+virStorageBackendLinstorBuildVolFrom(virStoragePoolObjPtr pool,
+ virStorageVolDefPtr vol,
+ virStorageVolDefPtr inputvol,
+ unsigned int flags)
+{
+ virStorageBackendBuildVolFrom build_func;
+
+ build_func = virStorageBackendGetBuildVolFromFunction(vol, inputvol);
+ if (!build_func)
+ return -1;
+
+ return build_func(pool, vol, inputvol, flags);
+}
+
+
+static int
+virStorageBackendLinstorDeleteVol(virStoragePoolObjPtr pool,
+ virStorageVolDefPtr vol,
+ unsigned int flags)
+{
+ g_autoptr(virCommand) cmd = NULL;
+
+ (void)pool;
+ virCheckFlags(0, -1);
+
+ cmd = virStorageBackendLinstorPrepLinstorCmd(false);
+ virCommandAddArgList(cmd, "resource-definition", "delete", vol->name, NULL);
+ return virCommandRun(cmd, NULL);
+}
+
+
+static int
+virStorageBackendLinstorResizeVol(virStoragePoolObjPtr pool,
+ virStorageVolDefPtr vol,
+ unsigned long long capacity,
+ unsigned int flags)
+{
+ g_autoptr(virCommand) cmd = NULL;
+
+ (void)pool;
+ virCheckFlags(0, -1);
+
+ cmd = virStorageBackendLinstorPrepLinstorCmd(false);
+ virCommandAddArgList(cmd, "volume-definition", "set-size", vol->name, "0", NULL);
+ virCommandAddArgFormat(cmd, "%lluKiB", capacity / 1024);
+ return virCommandRun(cmd, NULL);
+}
+
+
+/**
+ * @brief virStorageBackendVzCheck
+ * Check if we can connect to a Linstor-Controller
+ */
+static int
+virStorageBackendLinstorCheck(virStoragePoolObjPtr pool,
+ bool *isActive)
+{
+ g_autoptr(virCommand) cmd = NULL;
+
+ (void)pool;
+
+ /* This command gets the controller version */
+ cmd = virStorageBackendLinstorPrepLinstorCmd(false);
+ virCommandAddArgList(cmd, "controller", "version", NULL);
+ if (virCommandRun(cmd, NULL)) {
+ *isActive = false;
+ }
+
+ *isActive = true;
+ return 0;
+}
+
+virStorageBackend virStorageBackendLinstor = {
+ .type = VIR_STORAGE_POOL_LINSTOR,
+
+ .refreshPool = virStorageBackendLinstorRefreshPool,
+ .checkPool = virStorageBackendLinstorCheck,
+ .createVol = virStorageBackendLinstorCreateVol,
+ .buildVol = NULL,
+ .buildVolFrom = virStorageBackendLinstorBuildVolFrom,
+ .refreshVol = virStorageBackendLinstorRefreshVol,
+ .deleteVol = virStorageBackendLinstorDeleteVol,
+ .resizeVol = virStorageBackendLinstorResizeVol,
+ .uploadVol = virStorageBackendVolUploadLocal,
+ .downloadVol = virStorageBackendVolDownloadLocal,
+ .wipeVol = virStorageBackendVolWipeLocal,
+};
+
+
+int
+virStorageBackendLinstorRegister(void)
+{
+ return virStorageBackendRegister(&virStorageBackendLinstor);
+}
diff --git a/src/storage/storage_backend_linstor.h b/src/storage/storage_backend_linstor.h
new file mode 100644
index 0000000000..72750fd278
--- /dev/null
+++ b/src/storage/storage_backend_linstor.h
@@ -0,0 +1,23 @@
+/*
+ * storage_backend_linstor.h: storage backend for Sheepdog handling
+ *
+ * Copyright (C) 2020-2021 Rene Peinthor
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+int virStorageBackendLinstorRegister(void);
diff --git a/src/storage/storage_backend_linstor_priv.h b/src/storage/storage_backend_linstor_priv.h
new file mode 100644
index 0000000000..36503993b8
--- /dev/null
+++ b/src/storage/storage_backend_linstor_priv.h
@@ -0,0 +1,53 @@
+/*
+ * storage_backend_linstor_priv.h: header for functions necessary in tests
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LIBVIRT_STORAGE_BACKEND_LINSTOR_PRIV_H_ALLOW
+# error "storage_backend_linstor_priv.h may only be included by storage_backend_linstor.c or test suites"
+#endif /* LIBVIRT_STORAGE_BACKEND_LINSTOR_PRIV_H_ALLOW */
+
+#pragma once
+
+#include "virjson.h"
+#include "virstorageobj.h"
+#include "conf/storage_conf.h"
+
+int
+virStorageBackendLinstorFilterRscDefsForRscGroup(const char *resourceGroup,
+ const char *output,
+ virJSONValuePtr rscDefArrayOut);
+
+int
+virStorageBackendLinstorParseResourceGroupList(const char *resourceGroup,
+ const char *output,
+ virJSONValuePtr *storPoolArrayOut);
+
+int
+virStorageBackendLinstorParseStoragePoolList(virStoragePoolDefPtr pool,
+ const char* nodeName,
+ const char *output);
+
+int
+virStorageBackendLinstorParseResourceList(virStoragePoolObjPtr pool,
+ const char* nodeName,
+ virJSONValuePtr rscDefFilterArr,
+ const char *outputRscList,
+ const char *outputVolDef);
+
+int
+virStorageBackendLinstorParseVolumeDefinition(virStorageVolDefPtr vol,
+ const char *output);
diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c
index 16bc53aa46..63f860a963 100644
--- a/src/storage/storage_driver.c
+++ b/src/storage/storage_driver.c
@@ -1647,6 +1647,7 @@ storageVolLookupByPathCallback(virStoragePoolObjPtr obj,
case VIR_STORAGE_POOL_RBD:
case VIR_STORAGE_POOL_SHEEPDOG:
case VIR_STORAGE_POOL_ZFS:
+ case VIR_STORAGE_POOL_LINSTOR:
case VIR_STORAGE_POOL_LAST:
stable_path = g_strdup(data->path);
break;
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
index 29c4c86b1d..b03ede39d0 100644
--- a/src/test/test_driver.c
+++ b/src/test/test_driver.c
@@ -7103,6 +7103,7 @@ testStorageVolumeTypeForPool(int pooltype)
case VIR_STORAGE_POOL_ISCSI:
case VIR_STORAGE_POOL_SCSI:
case VIR_STORAGE_POOL_ZFS:
+ case VIR_STORAGE_POOL_LINSTOR:
return VIR_STORAGE_VOL_BLOCK;
case VIR_STORAGE_POOL_LAST:
default:
diff --git a/tests/linstorjsondata/broken.json b/tests/linstorjsondata/broken.json
new file mode 100644
index 0000000000..bce5bde3c6
--- /dev/null
+++ b/tests/linstorjsondata/broken.json
@@ -0,0 +1 @@
+[[{"name":"DfltRscGrp","select_filter":{"place_count":2},id":"a52e934a-9fd9-44cb-9db1-716dcd13aae3"},{"name":"libvirtgrp","select_filter":{"place_count":2,"storage_pool":"thinpool","storage_pool_list":["thinpool"]},"uuid":"7ec0bdee-9176-470e-8f7d-532032434160"}]]
diff --git a/tests/linstorjsondata/resource-group.json b/tests/linstorjsondata/resource-group.json
new file mode 100644
index 0000000000..3a2f959ad7
--- /dev/null
+++ b/tests/linstorjsondata/resource-group.json
@@ -0,0 +1 @@
+[[{"name":"DfltRscGrp","select_filter":{"place_count":2},"uuid":"a52e934a-9fd9-44cb-9db1-716dcd13aae3"},{"name":"libvirtgrp","select_filter":{"place_count":2,"storage_pool":"thinpool","storage_pool_list":["thinpool"]},"uuid":"7ec0bdee-9176-470e-8f7d-532032434160"}]]
diff --git a/tests/linstorjsondata/resource-list-test2.json b/tests/linstorjsondata/resource-list-test2.json
new file mode 100644
index 0000000000..86e7f04d82
--- /dev/null
+++ b/tests/linstorjsondata/resource-list-test2.json
@@ -0,0 +1,332 @@
+[
+ [
+ {
+ "name": "test2",
+ "node_name": "linstor1",
+ "props": {
+ "StorPoolName": "thinpool"
+ },
+ "layer_object": {
+ "children": [
+ {
+ "type": "STORAGE",
+ "storage": {
+ "storage_volumes": [
+ {
+ "volume_number": 0,
+ "device_path": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 106496,
+ "usable_size_kib": 106496,
+ "disk_state": "[]"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "DRBD",
+ "drbd": {
+ "drbd_resource_definition": {
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_stripe_size_kib": 32,
+ "port": 7001,
+ "transport_type": "IP",
+ "secret": "2vD4CZEbaEAO3XIZ/ICv",
+ "down": false
+ },
+ "node_id": 0,
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_size": 32,
+ "drbd_volumes": [
+ {
+ "drbd_volume_definition": {
+ "volume_number": 0,
+ "minor_number": 1001
+ },
+ "device_path": "/dev/drbd1001",
+ "backing_device": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 102460,
+ "usable_size_kib": 102400
+ }
+ ],
+ "connections": {
+ "linstor2": {
+ "connected": true,
+ "message": "Connected"
+ },
+ "linstor3": {
+ "connected": true,
+ "message": "Connected"
+ }
+ },
+ "promotion_score": 10102,
+ "may_promote": true
+ }
+ },
+ "state": {
+ "in_use": false
+ },
+ "uuid": "a567d642-02ab-4dd3-8183-a726b20aa9d9",
+ "create_timestamp": 1606836534107,
+ "volumes": [
+ {
+ "volume_number": 0,
+ "storage_pool_name": "thinpool",
+ "provider_kind": "LVM_THIN",
+ "device_path": "/dev/drbd1001",
+ "allocated_size_kib": 63,
+ "state": {
+ "disk_state": "UpToDate"
+ },
+ "layer_data_list": [
+ {
+ "type": "DRBD",
+ "data": {
+ "drbd_volume_definition": {
+ "volume_number": 0,
+ "minor_number": 1001
+ },
+ "device_path": "/dev/drbd1001",
+ "backing_device": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 102460,
+ "usable_size_kib": 102400
+ }
+ },
+ {
+ "type": "STORAGE",
+ "data": {
+ "volume_number": 0,
+ "device_path": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 106496,
+ "usable_size_kib": 106496,
+ "disk_state": "[]"
+ }
+ }
+ ],
+ "uuid": "2e4b6876-c6a0-4df9-8283-5633fce67dee"
+ }
+ ]
+ },
+ {
+ "name": "test2",
+ "node_name": "linstor2",
+ "props": {
+ "StorPoolName": "DfltDisklessStorPool"
+ },
+ "flags": [
+ "DISKLESS",
+ "DRBD_DISKLESS",
+ "TIE_BREAKER"
+ ],
+ "layer_object": {
+ "children": [
+ {
+ "type": "STORAGE",
+ "storage": {
+ "storage_volumes": [
+ {
+ "volume_number": 0,
+ "allocated_size_kib": 0,
+ "usable_size_kib": 102400
+ }
+ ]
+ }
+ }
+ ],
+ "type": "DRBD",
+ "drbd": {
+ "drbd_resource_definition": {
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_stripe_size_kib": 32,
+ "port": 7001,
+ "transport_type": "IP",
+ "secret": "2vD4CZEbaEAO3XIZ/ICv",
+ "down": false
+ },
+ "node_id": 2,
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_size": 32,
+ "flags": [
+ "DISKLESS",
+ "INITIALIZED"
+ ],
+ "drbd_volumes": [
+ {
+ "drbd_volume_definition": {
+ "volume_number": 0,
+ "minor_number": 1001
+ },
+ "device_path": "/dev/drbd1001",
+ "allocated_size_kib": -1,
+ "usable_size_kib": 102400
+ }
+ ],
+ "connections": {
+ "linstor1": {
+ "connected": true,
+ "message": "Connected"
+ },
+ "linstor3": {
+ "connected": true,
+ "message": "Connected"
+ }
+ },
+ "promotion_score": 2,
+ "may_promote": false
+ }
+ },
+ "state": {
+ "in_use": false
+ },
+ "uuid": "57696dfa-4e9e-4a95-93bb-787e8a34fe42",
+ "create_timestamp": 1606836534944,
+ "volumes": [
+ {
+ "volume_number": 0,
+ "storage_pool_name": "DfltDisklessStorPool",
+ "provider_kind": "DISKLESS",
+ "device_path": "/dev/drbd1001",
+ "allocated_size_kib": 0,
+ "state": {
+ "disk_state": "Diskless"
+ },
+ "layer_data_list": [
+ {
+ "type": "DRBD",
+ "data": {
+ "drbd_volume_definition": {
+ "volume_number": 0,
+ "minor_number": 1001
+ },
+ "device_path": "/dev/drbd1001",
+ "allocated_size_kib": -1,
+ "usable_size_kib": 102400
+ }
+ },
+ {
+ "type": "STORAGE",
+ "data": {
+ "volume_number": 0,
+ "allocated_size_kib": 0,
+ "usable_size_kib": 102400
+ }
+ }
+ ],
+ "uuid": "eb058821-f1f9-4d4e-8f89-c33839b71a6b"
+ }
+ ]
+ },
+ {
+ "name": "test2",
+ "node_name": "linstor3",
+ "props": {
+ "AutoSelectedStorPoolName": "thinpool",
+ "StorPoolName": "thinpool"
+ },
+ "layer_object": {
+ "children": [
+ {
+ "type": "STORAGE",
+ "storage": {
+ "storage_volumes": [
+ {
+ "volume_number": 0,
+ "device_path": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 106496,
+ "usable_size_kib": 106496,
+ "disk_state": "[]"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "DRBD",
+ "drbd": {
+ "drbd_resource_definition": {
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_stripe_size_kib": 32,
+ "port": 7001,
+ "transport_type": "IP",
+ "secret": "2vD4CZEbaEAO3XIZ/ICv",
+ "down": false
+ },
+ "node_id": 1,
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_size": 32,
+ "drbd_volumes": [
+ {
+ "drbd_volume_definition": {
+ "volume_number": 0,
+ "minor_number": 1001
+ },
+ "device_path": "/dev/drbd1001",
+ "backing_device": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 102460,
+ "usable_size_kib": 102400
+ }
+ ],
+ "connections": {
+ "linstor2": {
+ "connected": true,
+ "message": "Connected"
+ },
+ "linstor1": {
+ "connected": true,
+ "message": "Connected"
+ }
+ },
+ "promotion_score": 10102,
+ "may_promote": true
+ }
+ },
+ "state": {
+ "in_use": false
+ },
+ "uuid": "f2b1885d-c6e9-4878-9e0a-32d9939e7e73",
+ "create_timestamp": 1606836535621,
+ "volumes": [
+ {
+ "volume_number": 0,
+ "storage_pool_name": "thinpool",
+ "provider_kind": "LVM_THIN",
+ "device_path": "/dev/drbd1001",
+ "allocated_size_kib": 63,
+ "state": {
+ "disk_state": "UpToDate"
+ },
+ "layer_data_list": [
+ {
+ "type": "DRBD",
+ "data": {
+ "drbd_volume_definition": {
+ "volume_number": 0,
+ "minor_number": 1001
+ },
+ "device_path": "/dev/drbd1001",
+ "backing_device": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 102460,
+ "usable_size_kib": 102400
+ }
+ },
+ {
+ "type": "STORAGE",
+ "data": {
+ "volume_number": 0,
+ "device_path": "/dev/scratch/test2_00000",
+ "allocated_size_kib": 106496,
+ "usable_size_kib": 106496,
+ "disk_state": "[]"
+ }
+ }
+ ],
+ "uuid": "c1911c4e-8650-4743-bbc1-5000b133f7a5"
+ }
+ ]
+ }
+ ]
+]
diff --git a/tests/linstorjsondata/storage-pools-ssdpool.json b/tests/linstorjsondata/storage-pools-ssdpool.json
new file mode 100644
index 0000000000..4f83fcb96f
--- /dev/null
+++ b/tests/linstorjsondata/storage-pools-ssdpool.json
@@ -0,0 +1,72 @@
+[
+ [
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "kitfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2520332505,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "kitfox:ssdpool",
+ "uuid": "0ec1c14b-684f-432d-83c4-06c093fd5008",
+ "supports_snapshots": true
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "redfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2263879729,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "redfox:ssdpool",
+ "uuid": "ce5cadae-0138-4ce7-8294-9c2dd648ff2c",
+ "supports_snapshots": true
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "silverfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2541678516,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "silverfox:ssdpool",
+ "uuid": "1042ae20-ec3d-494a-8188-0032df0775a2",
+ "supports_snapshots": true
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "swiftfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2682983094,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "swiftfox:ssdpool",
+ "uuid": "38c8220f-b0c1-4977-8b3f-0376bc5f9ee2",
+ "supports_snapshots": true
+ }
+ ]
+]
diff --git a/tests/linstorjsondata/storage-pools.json b/tests/linstorjsondata/storage-pools.json
new file mode 100644
index 0000000000..446ae05ba9
--- /dev/null
+++ b/tests/linstorjsondata/storage-pools.json
@@ -0,0 +1,192 @@
+[
+ [
+ {
+ "storage_pool_name": "DfltDisklessStorPool",
+ "node_name": "kitfox",
+ "provider_kind": "DISKLESS",
+ "static_traits": {
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 9223372036854775807,
+ "total_capacity": 9223372036854775807,
+ "free_space_mgr_name": "kitfox:DfltDisklessStorPool",
+ "uuid": "d43400aa-cec3-4517-9eac-e188821dd537",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "DfltDisklessStorPool",
+ "node_name": "redfox",
+ "provider_kind": "DISKLESS",
+ "static_traits": {
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 9223372036854775807,
+ "total_capacity": 9223372036854775807,
+ "free_space_mgr_name": "redfox:DfltDisklessStorPool",
+ "uuid": "227873ed-422b-4200-a01b-13050218d67b",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "DfltDisklessStorPool",
+ "node_name": "silverfox",
+ "provider_kind": "DISKLESS",
+ "static_traits": {
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 9223372036854775807,
+ "total_capacity": 9223372036854775807,
+ "free_space_mgr_name": "silverfox:DfltDisklessStorPool",
+ "uuid": "62540ab6-8eea-4283-8e96-338ece66bf68",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "DfltDisklessStorPool",
+ "node_name": "swiftfox",
+ "provider_kind": "DISKLESS",
+ "static_traits": {
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 9223372036854775807,
+ "total_capacity": 9223372036854775807,
+ "free_space_mgr_name": "swiftfox:DfltDisklessStorPool",
+ "uuid": "0299ef6d-4a72-49c6-bcb1-681875b9da51",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "hddpool",
+ "node_name": "kitfox",
+ "provider_kind": "LVM",
+ "props": {
+ "StorDriver/StorPoolName": "hddpool"
+ },
+ "static_traits": {
+ "Provisioning": "Fat",
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 46365757440,
+ "total_capacity": 46884159488,
+ "free_space_mgr_name": "kitfox:hddpool",
+ "uuid": "896402d9-a128-45b2-9a22-73a75ff02cf8",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "hddpool",
+ "node_name": "redfox",
+ "provider_kind": "LVM",
+ "props": {
+ "StorDriver/StorPoolName": "hddpool"
+ },
+ "static_traits": {
+ "Provisioning": "Fat",
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 44282089472,
+ "total_capacity": 46884159488,
+ "free_space_mgr_name": "redfox:hddpool",
+ "uuid": "8b1ba3b9-a202-4d79-bc1e-bd0c4b44d700",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "hddpool",
+ "node_name": "silverfox",
+ "provider_kind": "LVM",
+ "props": {
+ "StorDriver/StorPoolName": "hddpool"
+ },
+ "static_traits": {
+ "Provisioning": "Fat",
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 46346166272,
+ "total_capacity": 46884159488,
+ "free_space_mgr_name": "silverfox:hddpool",
+ "uuid": "0425df26-da1f-4980-af55-1117b7671b28",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "hddpool",
+ "node_name": "swiftfox",
+ "provider_kind": "LVM",
+ "props": {
+ "StorDriver/StorPoolName": "hddpool"
+ },
+ "static_traits": {
+ "Provisioning": "Fat",
+ "SupportsSnapshots": "false"
+ },
+ "free_capacity": 44498202624,
+ "total_capacity": 46884159488,
+ "free_space_mgr_name": "swiftfox:hddpool",
+ "uuid": "5be30898-c8fa-4bea-a54e-99062d43ab30",
+ "supports_snapshots": false
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "kitfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2520332505,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "kitfox:ssdpool",
+ "uuid": "0ec1c14b-684f-432d-83c4-06c093fd5008",
+ "supports_snapshots": true
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "redfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2263879729,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "redfox:ssdpool",
+ "uuid": "ce5cadae-0138-4ce7-8294-9c2dd648ff2c",
+ "supports_snapshots": true
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "silverfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2541678516,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "silverfox:ssdpool",
+ "uuid": "1042ae20-ec3d-494a-8188-0032df0775a2",
+ "supports_snapshots": true
+ },
+ {
+ "storage_pool_name": "ssdpool",
+ "node_name": "swiftfox",
+ "provider_kind": "LVM_THIN",
+ "props": {
+ "StorDriver/StorPoolName": "ssdpool/ssdthin"
+ },
+ "static_traits": {
+ "Provisioning": "Thin",
+ "SupportsSnapshots": "true"
+ },
+ "free_capacity": 2682983094,
+ "total_capacity": 3006480384,
+ "free_space_mgr_name": "swiftfox:ssdpool",
+ "uuid": "38c8220f-b0c1-4977-8b3f-0376bc5f9ee2",
+ "supports_snapshots": true
+ }
+ ]
+]
diff --git a/tests/linstorjsondata/volume-def-list.json b/tests/linstorjsondata/volume-def-list.json
new file mode 100644
index 0000000000..130d258395
--- /dev/null
+++ b/tests/linstorjsondata/volume-def-list.json
@@ -0,0 +1,158 @@
+[
+ [
+ {
+ "name": "test",
+ "props": {
+ "DrbdPrimarySetOn": "LINSTOR2",
+ "DrbdOptions/Handlers/quorum-lost": "/usr/bin/false",
+ "DrbdOptions/Resource/on-no-quorum": "io-error",
+ "DrbdOptions/Resource/quorum": "majority"
+ },
+ "layer_data": [
+ {
+ "type": "DRBD",
+ "data": {
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_stripe_size_kib": 32,
+ "port": 7000,
+ "transport_type": "IP",
+ "secret": "nho9cGTqxHSpsL3PVRlh",
+ "down": false
+ }
+ },
+ {
+ "type": "STORAGE"
+ }
+ ],
+ "uuid": "8a6f32cd-05c7-4ab0-b1e4-28ae7bd9716c",
+ "resource_group_name": "DfltRscGrp",
+ "volume_definitions": [
+ {
+ "volume_number": 0,
+ "size_kib": 102400,
+ "props": {
+ "DrbdCurrentGi": "8D2C5D7B9EFDE716",
+ "DrbdOptions/Disk/discard-zeroes-if-aligned": "yes",
+ "DrbdOptions/Disk/rs-discard-granularity": "65536"
+ },
+ "layer_data": [
+ {
+ "type": "DRBD",
+ "data": {
+ "volume_number": 0,
+ "minor_number": 1000
+ }
+ },
+ {
+ "type": "STORAGE"
+ }
+ ],
+ "uuid": "d5711c63-4aa8-4d01-97a6-982129fb757a"
+ }
+ ]
+ },
+ {
+ "name": "test2",
+ "props": {
+ "DrbdPrimarySetOn": "LINSTOR1",
+ "DrbdOptions/Resource/on-no-quorum": "io-error",
+ "DrbdOptions/Resource/quorum": "majority"
+ },
+ "layer_data": [
+ {
+ "type": "DRBD",
+ "data": {
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_stripe_size_kib": 32,
+ "port": 7001,
+ "transport_type": "IP",
+ "secret": "2vD4CZEbaEAO3XIZ/ICv",
+ "down": false
+ }
+ },
+ {
+ "type": "STORAGE"
+ }
+ ],
+ "uuid": "1b0b86a3-eba1-4453-b8ac-b8389f1ef732",
+ "resource_group_name": "libvirtgrp",
+ "volume_definitions": [
+ {
+ "volume_number": 0,
+ "size_kib": 102400,
+ "props": {
+ "DrbdCurrentGi": "65E85E9CE441095A",
+ "DrbdOptions/Disk/discard-zeroes-if-aligned": "yes",
+ "DrbdOptions/Disk/rs-discard-granularity": "65536"
+ },
+ "layer_data": [
+ {
+ "type": "DRBD",
+ "data": {
+ "volume_number": 0,
+ "minor_number": 1001
+ }
+ },
+ {
+ "type": "STORAGE"
+ }
+ ],
+ "uuid": "3d859e23-db34-4b29-a1d9-7237dca34b48"
+ }
+ ]
+ },
+ {
+ "name": "test3",
+ "props": {
+ "DrbdPrimarySetOn": "LINSTOR2",
+ "DrbdOptions/Resource/on-no-quorum": "io-error",
+ "DrbdOptions/Resource/quorum": "majority"
+ },
+ "layer_data": [
+ {
+ "type": "DRBD",
+ "data": {
+ "peer_slots": 7,
+ "al_stripes": 1,
+ "al_stripe_size_kib": 32,
+ "port": 7002,
+ "transport_type": "IP",
+ "secret": "YfrSh5eLhZStJ0JHFaVx",
+ "down": false
+ }
+ },
+ {
+ "type": "STORAGE"
+ }
+ ],
+ "uuid": "1d14069d-2a17-4865-be9f-73be985c029e",
+ "resource_group_name": "libvirtgrp",
+ "volume_definitions": [
+ {
+ "volume_number": 0,
+ "size_kib": 102400,
+ "props": {
+ "DrbdCurrentGi": "57D5597EEFCA7CCE",
+ "DrbdOptions/Disk/discard-zeroes-if-aligned": "yes",
+ "DrbdOptions/Disk/rs-discard-granularity": "65536"
+ },
+ "layer_data": [
+ {
+ "type": "DRBD",
+ "data": {
+ "volume_number": 0,
+ "minor_number": 1002
+ }
+ },
+ {
+ "type": "STORAGE"
+ }
+ ],
+ "uuid": "f57e6b5f-e516-4a7d-ba3a-24af5cad0428"
+ }
+ ]
+ }
+ ]
+]
diff --git a/tests/linstorjsondata/volume-definition-test2.json b/tests/linstorjsondata/volume-definition-test2.json
new file mode 100644
index 0000000000..cf9b8d3655
--- /dev/null
+++ b/tests/linstorjsondata/volume-definition-test2.json
@@ -0,0 +1 @@
+[[{"name":"test2","props":{"DrbdPrimarySetOn":"LINSTOR1","DrbdOptions/Resource/on-no-quorum":"io-error","DrbdOptions/Resource/quorum":"majority"},"layer_data":[{"type":"DRBD","data":{"peer_slots":7,"al_stripes":1,"al_stripe_size_kib":32,"port":7001,"transport_type":"IP","secret":"N5JDi3hRpTqPnIz/uioJ","down":false}},{"type":"STORAGE"}],"uuid":"fe7f23be-7f1f-4d31-b335-bcde071d5b99","resource_group_name":"DfltRscGrp","volume_definitions":[{"volume_number":0,"size_kib":102400,"props":{"DrbdCurrentGi":"7A18B00623CC057A","DrbdOptions/Disk/discard-zeroes-if-aligned":"yes","DrbdOptions/Disk/rs-discard-granularity":"65536"},"layer_data":[{"type":"DRBD","data":{"volume_number":0,"minor_number":1001}},{"type":"STORAGE"}],"uuid":"984e57bd-adec-464f-b9a3-ce0a10ae2711"}]}]]
diff --git a/tests/meson.build b/tests/meson.build
index f1d91ca50d..616e904b26 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -519,6 +519,12 @@ if conf.has('WITH_STORAGE_SHEEPDOG')
]
endif
+if conf.has('WITH_STORAGE_LINSTOR')
+ tests += [
+ { 'name': 'storagebackendlinstortest', 'link_with': [ storage_driver_impl_lib, storage_backend_linstor_priv_lib ] },
+ ]
+endif
+
if conf.has('WITH_VBOX')
tests += [
{ 'name': 'vboxsnapshotxmltest', 'link_with': [ vbox_driver_impl ] },
diff --git a/tests/storagebackendlinstortest.c b/tests/storagebackendlinstortest.c
new file mode 100644
index 0000000000..6c35bac2e9
--- /dev/null
+++ b/tests/storagebackendlinstortest.c
@@ -0,0 +1,371 @@
+/*
+ * storagebackendlinstortest.c: test for linstor storage backend
+ *
+ * Copyright (C) 2020-2021 Rene Peinthor
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <config.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdio.h>
+
+#include "internal.h"
+#include "testutils.h"
+#define LIBVIRT_STORAGE_BACKEND_LINSTOR_PRIV_H_ALLOW
+#include "storage/storage_backend_linstor_priv.h"
+
+#define VIR_FROM_THIS VIR_FROM_NONE
+
+struct testStoragePoolListParserData {
+ const char *input_json;
+ const char *poolxml;
+ const char *node_name;
+ int expected_return;
+ uint64_t expected_capacity;
+ uint64_t expected_allocation;
+};
+
+struct testVolumeDefinitionListParserData {
+ const char *input_json;
+ const char *poolxml;
+ const char *volxml;
+ int expected_return;
+ uint64_t expected_capacity;
+};
+
+struct testResourceListParserData {
+ const char *rsclist_json;
+ const char *voldeflist_json;
+ const char *poolxml;
+ const char *noden_name;
+ const char *rsc_filter_json;
+ int expected_return;
+ size_t expected_volume_count;
+};
+
+struct testResourceGroupListParserData {
+ const char *input_json;
+ const char *poolxml;
+ const char *rsc_grp;
+ int expected_return;
+ const char *expected_storpools;
+};
+
+struct testResourceDeployedData {
+ const char *input_json;
+ const char *rscname;
+ const char *nodename;
+ int expected_return;
+};
+
+
+static int
+test_resourcegroup_list_parser(const void *opaque)
+{
+ const struct testResourceGroupListParserData *data = opaque;
+ g_autofree char *poolxml = NULL;
+ g_autoptr(virStoragePoolDef) pool = NULL;
+ g_autofree char *inputJson = NULL;
+ g_autofree char *indata = NULL;
+ virJSONValuePtr storagePoolList = NULL;
+
+ inputJson = g_strdup_printf("%s/linstorjsondata/%s",
+ abs_srcdir, data->input_json);
+
+ poolxml = g_strdup_printf("%s/storagepoolxml2xmlin/%s",
+ abs_srcdir, data->poolxml);
+
+ if (!(pool = virStoragePoolDefParseFile(poolxml))) {
+ return -1;
+ }
+
+ if (virTestLoadFile(inputJson, &indata) < 0)
+ return -1;
+
+ if (virStorageBackendLinstorParseResourceGroupList(data->rsc_grp,
+ indata,
+ &storagePoolList) != 0) {
+ virJSONValueFree(storagePoolList);
+ return -1;
+ }
+
+ if (storagePoolList == NULL) {
+ return -1;
+ }
+
+ if (g_strcmp0(virJSONValueToString(storagePoolList, false),
+ data->expected_storpools) != 0) {
+ virJSONValueFree(storagePoolList);
+ return -1;
+ }
+
+ virJSONValueFree(storagePoolList);
+ return 0;
+}
+
+
+static int
+run_test_resourcegroup_list_parser(void)
+{
+ int ret = 0;
+
+ struct testResourceGroupListParserData rscGrpTest[] = {
+ { "resource-group.json", "pool-linstor.xml", "libvirtgrp", 0, "[\"thinpool\"]" },
+ { NULL, NULL, NULL, 0, NULL }
+ };
+
+ /* volumedefinition list parse */
+ struct testResourceGroupListParserData *test = rscGrpTest;
+ {
+ while (test->input_json != NULL) {
+ if (virTestRun(
+ "resourcegroup_list_parser",
+ test_resourcegroup_list_parser, test) < 0)
+ ret = -1;
+ ++test;
+ }
+ }
+ return ret;
+}
+
+
+static int
+test_storagepool_list_parser(const void *opaque)
+{
+ const struct testStoragePoolListParserData *data = opaque;
+ g_autofree char *poolxml = NULL;
+ g_autoptr(virStoragePoolDef) pool = NULL;
+ g_autofree char *inputJson = NULL;
+ g_autofree char *indata = NULL;
+
+ inputJson = g_strdup_printf("%s/linstorjsondata/%s",
+ abs_srcdir, data->input_json);
+
+ poolxml = g_strdup_printf("%s/storagepoolxml2xmlin/%s",
+ abs_srcdir, data->poolxml);
+
+ if (!(pool = virStoragePoolDefParseFile(poolxml)))
+ return -1;
+
+ if (virTestLoadFile(inputJson, &indata) < 0)
+ return -1;
+
+ if (virStorageBackendLinstorParseStoragePoolList(pool, data->node_name, indata) !=
+ data->expected_return)
+ return -1;
+
+ if (data->expected_return)
+ return 0;
+
+ if (pool->capacity == data->expected_capacity &&
+ pool->allocation == data->expected_allocation)
+ return 0;
+
+ return -1;
+}
+
+
+static int
+run_test_storagepool_list_parser(void)
+{
+ int ret = 0;
+
+ struct testStoragePoolListParserData storPoolTest[] = {
+ { "storage-pools-ssdpool.json", "pool-linstor.xml", "redfox", 0, 3078635913216, 760423070720 },
+ { "storage-pools.json", "pool-linstor.xml", "silverfox", 0, 51088015228928, 1026862166016 },
+ { NULL, NULL, NULL, 0, 0, 0 }
+ };
+ /* volumedefinition list parse */
+ struct testStoragePoolListParserData *test = storPoolTest;
+ {
+ while (test->input_json != NULL) {
+ if (virTestRun(
+ "test_storagepool_list_parser",
+ test_storagepool_list_parser, test) < 0)
+ ret = -1;
+ ++test;
+ }
+ }
+ return ret;
+}
+
+
+static int
+test_volumedefinition_list_parser(const void *opaque)
+{
+ const struct testVolumeDefinitionListParserData *data = opaque;
+ g_autoptr(virStoragePoolDef) pool = NULL;
+ g_autoptr(virStorageVolDef) vol = NULL;
+ g_autofree char *poolxml = NULL;
+ g_autofree char *volxml = NULL;
+ g_autofree char *inputJson = NULL;
+ g_autofree char *indata = NULL;
+
+ inputJson = g_strdup_printf("%s/linstorjsondata/%s",
+ abs_srcdir, data->input_json);
+
+ poolxml = g_strdup_printf("%s/storagepoolxml2xmlin/%s",
+ abs_srcdir, data->poolxml);
+
+ volxml = g_strdup_printf("%s/storagevolxml2xmlin/%s",
+ abs_srcdir, data->volxml);
+
+ if (!(pool = virStoragePoolDefParseFile(poolxml)))
+ return -1;
+
+ if (!(vol = virStorageVolDefParseFile(pool, volxml, 0)))
+ return -1;
+
+ if (virTestLoadFile(inputJson, &indata) < 0)
+ return -1;
+
+ if (virStorageBackendLinstorParseVolumeDefinition(vol, indata) !=
+ data->expected_return)
+ return -1;
+
+ if (data->expected_return)
+ return 0;
+
+ if (vol->target.capacity == data->expected_capacity)
+ return 0;
+
+ return -1;
+}
+
+
+static int
+run_test_volumedefinition_list_parser(void)
+{
+ int ret = 0;
+
+ struct testVolumeDefinitionListParserData volumeDefTest[] = {
+ { "volume-definition-test2.json", "pool-linstor.xml", "vol-linstor.xml", 0, 104857600 },
+ { NULL, NULL, NULL, 0, 0 }
+ };
+ /* volumedefinition list parse */
+ struct testVolumeDefinitionListParserData *test = volumeDefTest;
+ {
+ while (test->input_json != NULL) {
+ if (virTestRun(
+ "volumedefinition_list_parser",
+ test_volumedefinition_list_parser, test) < 0)
+ ret = -1;
+ ++test;
+ }
+ }
+ return ret;
+}
+
+
+static int
+testResourceListParser(const void *opaque)
+{
+ int ret = -1;
+ const struct testResourceListParserData *data = opaque;
+ virStoragePoolObjPtr pool = NULL;
+ virStoragePoolDefPtr poolDef = NULL;
+ g_autofree char *poolxml = NULL;
+ g_autofree char *rscListJsonFile = NULL;
+ g_autofree char *volDefListJsonFile = NULL;
+ g_autofree char *rscListData = NULL;
+ g_autofree char *volDefListData = NULL;
+ virJSONValuePtr rscFilterArr = NULL;
+
+ rscListJsonFile = g_strdup_printf("%s/linstorjsondata/%s",
+ abs_srcdir, data->rsclist_json);
+
+ volDefListJsonFile = g_strdup_printf("%s/linstorjsondata/%s",
+ abs_srcdir, data->voldeflist_json);
+
+ poolxml = g_strdup_printf("%s/storagepoolxml2xmlin/%s",
+ abs_srcdir, data->poolxml);
+
+ rscFilterArr = virJSONValueFromString(data->rsc_filter_json);
+
+ if (!(poolDef = virStoragePoolDefParseFile(poolxml)))
+ goto cleanup;
+
+ if (!(pool = virStoragePoolObjNew()))
+ goto cleanup;
+
+ virStoragePoolObjSetDef(pool, poolDef);
+
+ if (virTestLoadFile(rscListJsonFile, &rscListData) < 0)
+ goto cleanup;
+
+ if (virTestLoadFile(volDefListJsonFile, &volDefListData) < 0)
+ goto cleanup;
+
+ if (virStorageBackendLinstorParseResourceList(pool,
+ data->noden_name,
+ rscFilterArr,
+ rscListData,
+ volDefListData) != data->expected_return)
+ goto cleanup;
+
+ if (data->expected_return) {
+ ret = 0;
+ goto cleanup;
+ }
+
+ if (data->expected_volume_count == virStoragePoolObjGetVolumesCount(pool))
+ ret = 0;
+
+ cleanup:
+ virStoragePoolObjEndAPI(&pool);
+ return ret;
+}
+
+static int
+runTestResourceListParser(void)
+{
+ int ret = 0;
+ struct testResourceListParserData rscListParseData[] = {
+ { "resource-list-test2.json", "volume-def-list.json", "pool-linstor.xml", "linstor1", "[\"test2\"]", 0, 1 },
+ { NULL, NULL, NULL, NULL, NULL, 0, 0}
+ };
+
+ struct testResourceListParserData *test = rscListParseData;
+ {
+ while (test->rsclist_json != NULL) {
+ if (virTestRun(
+ "virStorageBackendLinstorParseResourceList",
+ testResourceListParser, test) < 0)
+ ret = -1;
+ ++test;
+ }
+ }
+
+ return ret;
+}
+
+
+static int
+mymain(void)
+{
+ int ret = 0;
+
+ ret = run_test_resourcegroup_list_parser() ? -1 : ret;
+ ret = run_test_storagepool_list_parser() ? -1 : ret;
+ ret = run_test_volumedefinition_list_parser() ? -1 : ret;
+ ret = runTestResourceListParser() ? -1 : ret;
+
+ return ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+VIR_TEST_MAIN(mymain)
diff --git a/tests/storagepoolcapsschemadata/poolcaps-fs.xml b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
index eee75af746..2ddb18082e 100644
--- a/tests/storagepoolcapsschemadata/poolcaps-fs.xml
+++ b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
@@ -204,4 +204,11 @@
</enum>
</volOptions>
</pool>
+ <pool type='linstor' supported='no'>
+ <volOptions>
+ <defaultFormat type='raw'/>
+ <enum name='targetFormatType'>
+ </enum>
+ </volOptions>
+ </pool>
</storagepoolCapabilities>
diff --git a/tests/storagepoolcapsschemadata/poolcaps-full.xml b/tests/storagepoolcapsschemadata/poolcaps-full.xml
index 805950a937..cdce2d1ad8 100644
--- a/tests/storagepoolcapsschemadata/poolcaps-full.xml
+++ b/tests/storagepoolcapsschemadata/poolcaps-full.xml
@@ -204,4 +204,11 @@
</enum>
</volOptions>
</pool>
+ <pool type='linstor' supported='yes'>
+ <volOptions>
+ <defaultFormat type='raw'/>
+ <enum name='targetFormatType'>
+ </enum>
+ </volOptions>
+ </pool>
</storagepoolCapabilities>
diff --git a/tests/storagepoolxml2argvtest.c b/tests/storagepoolxml2argvtest.c
index 967d1f21a8..a632b07a00 100644
--- a/tests/storagepoolxml2argvtest.c
+++ b/tests/storagepoolxml2argvtest.c
@@ -68,6 +68,7 @@ testCompareXMLToArgvFiles(bool shouldFail,
case VIR_STORAGE_POOL_GLUSTER:
case VIR_STORAGE_POOL_ZFS:
case VIR_STORAGE_POOL_VSTORAGE:
+ case VIR_STORAGE_POOL_LINSTOR:
case VIR_STORAGE_POOL_LAST:
default:
VIR_TEST_DEBUG("pool type '%s' has no xml2argv test", defTypeStr);
diff --git a/tests/storagepoolxml2xmlin/pool-linstor.xml b/tests/storagepoolxml2xmlin/pool-linstor.xml
new file mode 100644
index 0000000000..36f2781e21
--- /dev/null
+++ b/tests/storagepoolxml2xmlin/pool-linstor.xml
@@ -0,0 +1,8 @@
+<pool type="linstor">
+ <source>
+ <name>DfltRscGrp</name>
+ <host name="linstor1"/>
+ </source>
+ <uuid>65fcba04-5b13-bd93-cff3-52ce48e11ad7</uuid>
+ <name>linstor</name>
+</pool>
diff --git a/tests/storagevolxml2xmlin/vol-linstor.xml b/tests/storagevolxml2xmlin/vol-linstor.xml
new file mode 100644
index 0000000000..7369f4f673
--- /dev/null
+++ b/tests/storagevolxml2xmlin/vol-linstor.xml
@@ -0,0 +1,10 @@
+<volume type='network'>
+ <name>test2</name>
+ <source>
+ </source>
+ <capacity unit='bytes'>1024</capacity>
+ <allocation unit='bytes'>0</allocation>
+ <target>
+ <path>/dev/drbd1000</path>
+ </target>
+</volume>
diff --git a/tools/virsh-pool.c b/tools/virsh-pool.c
index 7835fa6d75..0e5bec1688 100644
--- a/tools/virsh-pool.c
+++ b/tools/virsh-pool.c
@@ -1237,6 +1237,9 @@ cmdPoolList(vshControl *ctl, const vshCmd *cmd G_GNUC_UNUSED)
case VIR_STORAGE_POOL_VSTORAGE:
flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE;
break;
+ case VIR_STORAGE_POOL_LINSTOR:
+ flags |= VIR_CONNECT_LIST_STORAGE_POOLS_LINSTOR;
+ break;
case VIR_STORAGE_POOL_LAST:
break;
}
--
2.30.0
3 years, 11 months
[PATCH 0/3] Storage backend Linstor V2
by Rene Peinthor
Here is the updated PATCH with split commits,
and changes from the first review.
Rene Peinthor (3):
storage: Linstor configuration
storage: Linstor support
storage: Add tests for the Linstor storage backend
docs/schemas/storagepool.rng | 27 +
docs/storage.html.in | 39 +
include/libvirt/libvirt-storage.h | 1 +
meson.build | 6 +
meson_options.txt | 1 +
po/POTFILES.in | 1 +
src/conf/domain_conf.c | 1 +
src/conf/storage_conf.c | 14 +-
src/conf/storage_conf.h | 1 +
src/conf/virstorageobj.c | 4 +-
src/storage/meson.build | 25 +
src/storage/storage_backend.c | 6 +
src/storage/storage_backend_linstor.c | 781 ++++++++++++++++++
src/storage/storage_backend_linstor.h | 24 +
src/storage/storage_backend_linstor_priv.h | 53 ++
src/storage/storage_driver.c | 1 +
src/test/test_driver.c | 1 +
tests/linstorjsondata/broken.json | 1 +
tests/linstorjsondata/resource-group.json | 1 +
.../linstorjsondata/resource-list-test2.json | 332 ++++++++
.../storage-pools-ssdpool.json | 72 ++
tests/linstorjsondata/storage-pools.json | 192 +++++
tests/linstorjsondata/volume-def-list.json | 158 ++++
.../volume-definition-test2.json | 1 +
tests/meson.build | 6 +
tests/storagebackendlinstortest.c | 372 +++++++++
.../storagepoolcapsschemadata/poolcaps-fs.xml | 7 +
.../poolcaps-full.xml | 7 +
tests/storagepoolxml2argvtest.c | 1 +
tests/storagepoolxml2xmlin/pool-linstor.xml | 8 +
tests/storagevolxml2xmlin/vol-linstor.xml | 10 +
tools/virsh-pool.c | 3 +
32 files changed, 2155 insertions(+), 2 deletions(-)
create mode 100644 src/storage/storage_backend_linstor.c
create mode 100644 src/storage/storage_backend_linstor.h
create mode 100644 src/storage/storage_backend_linstor_priv.h
create mode 100644 tests/linstorjsondata/broken.json
create mode 100644 tests/linstorjsondata/resource-group.json
create mode 100644 tests/linstorjsondata/resource-list-test2.json
create mode 100644 tests/linstorjsondata/storage-pools-ssdpool.json
create mode 100644 tests/linstorjsondata/storage-pools.json
create mode 100644 tests/linstorjsondata/volume-def-list.json
create mode 100644 tests/linstorjsondata/volume-definition-test2.json
create mode 100644 tests/storagebackendlinstortest.c
create mode 100644 tests/storagepoolxml2xmlin/pool-linstor.xml
create mode 100644 tests/storagevolxml2xmlin/vol-linstor.xml
--
2.30.0
3 years, 11 months
[PATCH] vstorage: remove build time checks for runtime binaries
by Nikolay Shirokovskiy
Accoring to current agreement mentioned in list recently [1]. Now
vstorage driver will be build in default devs environment and also can
be included into CI. This also closes quite old abandoned thread on
alternative checks for binaries in case of this same driver [2].
[1] https://www.redhat.com/archives/libvir-list/2021-January/msg00750.html
[2] https://www.redhat.com/archives/libvir-list/2020-July/msg00697.html
Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy(a)virtuozzo.com>
---
meson.build | 22 ++--------------------
src/storage/storage_backend_vstorage.c | 4 ++--
2 files changed, 4 insertions(+), 22 deletions(-)
diff --git a/meson.build b/meson.build
index b5277b4..e3e7ff7 100644
--- a/meson.build
+++ b/meson.build
@@ -1957,26 +1957,8 @@ if conf.has('WITH_LIBVIRTD')
endif
if not get_option('storage_vstorage').disabled()
- vstorage_enable = true
-
- foreach name : ['vstorage', 'vstorage-mount', 'umount']
- set_variable(
- '@0@_prog'.format(name.underscorify()),
- find_program(name, required: get_option('storage_vstorage'), dirs: libvirt_sbin_path)
- )
- if not get_variable('@0@_prog'.format(name.underscorify())).found()
- vstorage_enable = false
- endif
- endforeach
-
- if vstorage_enable
- use_storage = true
- conf.set('WITH_STORAGE_VSTORAGE', 1)
- foreach name : ['vstorage', 'vstorage-mount', 'umount']
- path = get_variable('@0@_prog'.format(name.underscorify())).path()
- conf.set_quoted(name.to_upper(), path)
- endforeach
- endif
+ use_storage = true
+ conf.set('WITH_STORAGE_VSTORAGE', 1)
endif
if not get_option('storage_zfs').disabled()
diff --git a/src/storage/storage_backend_vstorage.c b/src/storage/storage_backend_vstorage.c
index 6cff9f1..7c67407 100644
--- a/src/storage/storage_backend_vstorage.c
+++ b/src/storage/storage_backend_vstorage.c
@@ -65,7 +65,7 @@ virStorageBackendVzPoolStart(virStoragePoolObjPtr pool)
mode = g_strdup_printf("%o", def->target.perms.mode);
- cmd = virCommandNewArgList(VSTORAGE_MOUNT,
+ cmd = virCommandNewArgList("vstorage-mount",
"-c", def->source.name,
def->target.path,
"-m", mode,
@@ -129,7 +129,7 @@ virStorageBackendVzPoolStop(virStoragePoolObjPtr pool)
if ((rc = virStorageBackendVzIsMounted(pool)) != 1)
return rc;
- cmd = virCommandNewArgList(UMOUNT, def->target.path, NULL);
+ cmd = virCommandNewArgList("umount", def->target.path, NULL);
return virCommandRun(cmd, NULL);
}
--
1.8.3.1
3 years, 11 months
[PATCH] meson: fix vstorage driver build
by Nikolay Shirokovskiy
It breaks on using - in VSTORAGE-MOUNT definition with:
In file included from ../config.h:19:0,
from ../src/util/viraudit.c:22:
./meson-config.h:180:17: error: ISO C99 requires whitespace after the macro name [-Werror]
#define VSTORAGE-MOUNT "/usr/bin/vstorage-mount"
^
./meson-config.h:180:0: error: "VSTORAGE" redefined [-Werror]
#define VSTORAGE-MOUNT "/usr/bin/vstorage-mount"
^
./meson-config.h:178:0: note: this is the location of the previous definition
#define VSTORAGE "/usr/bin/vstorage"
^
#define VSTORAGE-MOUNT "/usr/bin/vstorage-mount"
---
meson.build | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/meson.build b/meson.build
index b5277b4..aff2565 100644
--- a/meson.build
+++ b/meson.build
@@ -1974,7 +1974,7 @@ if conf.has('WITH_LIBVIRTD')
conf.set('WITH_STORAGE_VSTORAGE', 1)
foreach name : ['vstorage', 'vstorage-mount', 'umount']
path = get_variable('@0@_prog'.format(name.underscorify())).path()
- conf.set_quoted(name.to_upper(), path)
+ conf.set_quoted(name.underscorify().to_upper(), path)
endforeach
endif
endif
--
1.8.3.1
3 years, 11 months
[PATCH 0/2] Move generation of NVDIMM UUID into post parse callback
by Michal Privoznik
I've noticed this while working on virtio-pmem-pci patches.
Michal Prívozník (2):
conf: Turn @uuid member of _virDomainMemoryDef struct into a pointer
conf: Move generation of NVDIMM UUID into post parse callback
src/conf/domain_conf.c | 47 +++++++++++++++++++++++++----------------
src/conf/domain_conf.h | 2 +-
src/qemu/qemu_command.c | 2 +-
3 files changed, 31 insertions(+), 20 deletions(-)
--
2.26.2
3 years, 11 months
[PATCH v2] spec: Add the man pages of split daemons
by Han Han
Fix the errors from commit a7cafa7bc2 when build RPMs from spec file:
error: Installed (but unpackaged) file(s) found:
/usr/share/man/man8/virtinterfaced.8.gz
/usr/share/man/man8/virtlxcd.8.gz
/usr/share/man/man8/virtnetworkd.8.gz
/usr/share/man/man8/virtnodedevd.8.gz
/usr/share/man/man8/virtnwfilterd.8.gz
/usr/share/man/man8/virtproxyd.8.gz
/usr/share/man/man8/virtqemud.8.gz
/usr/share/man/man8/virtsecretd.8.gz
/usr/share/man/man8/virtstoraged.8.gz
/usr/share/man/man8/virtvboxd.8.gz
/usr/share/man/man8/virtxend.8.gz
Signed-off-by: Han Han <hhan(a)redhat.com>
---
Diff from v1: remove man page for virtvzd and virtbhyved
---
libvirt.spec.in | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/libvirt.spec.in b/libvirt.spec.in
index b5892987cf..9458a7a02f 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -1581,6 +1581,16 @@ exit 0
%{_mandir}/man8/virtlogd.8*
%{_mandir}/man8/virtlockd.8*
%{_mandir}/man8/virtproxyd.8*
+%{_mandir}/man8/virtxend.8*
+%{_mandir}/man8/virtvboxd.8*
+%{_mandir}/man8/virtstoraged.8*
+%{_mandir}/man8/virtsecretd.8*
+%{_mandir}/man8/virtqemud.8*
+%{_mandir}/man8/virtnwfilterd.8*
+%{_mandir}/man8/virtnodedevd.8*
+%{_mandir}/man8/virtnetworkd.8*
+%{_mandir}/man8/virtlxcd.8*
+%{_mandir}/man8/virtinterfaced.8*
%{_mandir}/man7/virkey*.7*
%files daemon-config-network
--
2.29.2
3 years, 11 months
To start multiple KVM guests from one qcow2 image with transient disk option
by Masayoshi Mizuma
Hello,
I would like to start multiple KVM guests from one qcow2 image, and
discard the changes which the KVM guests done.
transient disk option is useful for discarding the changes, however,
we cannot start multiple KVM guest from one qcow2 image because the
image is write-locked by the first guest to be started.
I suppose the disk which transient option is enabled don't need to
get the write lock because any changes go to the overlay image, and
the overlay image is removed when the guest shutdown.
qemu has 'locking' option and the write lock is disabled when locking=off.
To implement that, I have two ideas. I would appreciate it if you could
give me the ideas which way is better (or another way).
1. Add an element to handle 'locking' qemu option. Like as:
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' locking='off'/>
<source file='/var/lib/libvirt/images/guest.qcow2'/>
<target dev='vda' bus='virtio'/>
<transient/>
</disk>
2. Add locking=off internally only if the transient disk option is enabled.
The sample code is as follows:
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 23415b323c..6fafe22ca3 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -10186,6 +10186,7 @@ virDomainDiskDefParseXML(virDomainXMLOptionPtr xmlopt,
def->src->shared = true;
} else if (virXMLNodeNameEqual(cur, "transient")) {
def->transient = true;
+ def->src->transient = true;
} else if (!encryption &&
virXMLNodeNameEqual(cur, "encryption")) {
if (!(encryption = virStorageEncryptionParseNode(cur, ctxt)))
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
index 4640e339c0..3db888d08b 100644
--- a/src/qemu/qemu_block.c
+++ b/src/qemu/qemu_block.c
@@ -1211,6 +1211,12 @@ qemuBlockStorageSourceGetBackendProps(virStorageSourcePtr src,
"s:discard", "unmap",
NULL) < 0)
return NULL;
+
+ if (src->transient) {
+ if (virJSONValueObjectAdd(fileprops,
+ "S:locking", "off", NULL) < 0)
+ return NULL;
+ }
}
}
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
index 15494c3415..7823810df6 100644
--- a/src/qemu/qemu_snapshot.c
+++ b/src/qemu/qemu_snapshot.c
@@ -1182,7 +1182,8 @@ qemuSnapshotDiskPrepareDisksTransient(virDomainObjPtr vm,
snapdisk->src = virStorageSourceNew();
snapdisk->src->type = VIR_STORAGE_TYPE_FILE;
snapdisk->src->format = VIR_STORAGE_FILE_QCOW2;
- snapdisk->src->path = g_strdup_printf("%s.TRANSIENT", domdisk->src->path);
+ snapdisk->src->path = g_strdup_printf("%s.TRANSIENT-%s",
+ domdisk->src->path, vm->def->name);
if (virFileExists(snapdisk->src->path)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
diff --git a/src/util/virstoragefile.h b/src/util/virstoragefile.h
index 87763cf389..70c963bd42 100644
--- a/src/util/virstoragefile.h
+++ b/src/util/virstoragefile.h
@@ -384,6 +384,8 @@ struct _virStorageSource {
/* these must not be used apart from formatting the output JSON in the qemu driver */
char *ssh_user;
bool ssh_host_key_check_disabled;
+
+ bool transient;
};
G_DEFINE_AUTOPTR_CLEANUP_FUNC(virStorageSource, virObjectUnref);
--
Thanks,
Masa
3 years, 11 months
[PATCH 0/6] Introduce virtio-pmem <memory/> model
by Michal Privoznik
Technically, this is v3 of:
https://www.redhat.com/archives/libvir-list/2020-December/msg00199.html
But I've split the big series (so no longer mixing virtio-mem and
virtio-pmem). The virtio-pmem-pci is very similar to NVDIM so the
implementation is fairly straightforward.
Michal Prívozník (6):
qemu_capabilities: Introduce QEMU_CAPS_DEVICE_VIRTIO_PMEM_PCI
conf: Introduce virtio-pmem <memory/> model
security: Relabel virtio-pmem
qemu: Allow virtio-pmem in CGroups
qemu: Create virtio-pmem in namespace
qemu: Build command line for virtio-pmem
docs/formatdomain.rst | 29 ++++--
docs/schemas/domaincommon.rng | 1 +
src/conf/domain_conf.c | 41 ++++++++-
src/conf/domain_conf.h | 3 +-
src/conf/domain_validate.c | 40 ++++++++-
src/qemu/qemu_alias.c | 58 ++++++++----
src/qemu/qemu_capabilities.c | 2 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_cgroup.c | 6 +-
src/qemu/qemu_command.c | 73 ++++++++-------
src/qemu/qemu_domain.c | 25 ++++++
src/qemu/qemu_domain_address.c | 88 +++++++++++++++----
src/qemu/qemu_domain_address.h | 3 +-
src/qemu/qemu_hotplug.c | 2 +-
src/qemu/qemu_namespace.c | 3 +-
src/qemu/qemu_validate.c | 8 ++
src/security/security_apparmor.c | 1 +
src/security/security_dac.c | 2 +
src/security/security_selinux.c | 2 +
.../caps_4.1.0.x86_64.xml | 1 +
.../caps_4.2.0.x86_64.xml | 1 +
.../caps_5.0.0.x86_64.xml | 1 +
.../caps_5.1.0.x86_64.xml | 1 +
.../caps_5.2.0.x86_64.xml | 1 +
...ory-hotplug-virtio-pmem.x86_64-latest.args | 45 ++++++++++
.../memory-hotplug-virtio-pmem.xml | 53 +++++++++++
tests/qemuxml2argvtest.c | 1 +
...mory-hotplug-virtio-pmem.x86_64-latest.xml | 1 +
tests/qemuxml2xmltest.c | 1 +
29 files changed, 414 insertions(+), 80 deletions(-)
create mode 100644 tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.x86_64-latest.args
create mode 100644 tests/qemuxml2argvdata/memory-hotplug-virtio-pmem.xml
create mode 120000 tests/qemuxml2xmloutdata/memory-hotplug-virtio-pmem.x86_64-latest.xml
--
2.26.2
3 years, 11 months
[PATCH] spec: Add the man pages of split daemons
by Han Han
Fix the errors from commit a7cafa7bc2 when build RPMs from spec file:
error: Installed (but unpackaged) file(s) found:
/usr/share/man/man8/virtinterfaced.8.gz
/usr/share/man/man8/virtlxcd.8.gz
/usr/share/man/man8/virtnetworkd.8.gz
/usr/share/man/man8/virtnodedevd.8.gz
/usr/share/man/man8/virtnwfilterd.8.gz
/usr/share/man/man8/virtproxyd.8.gz
/usr/share/man/man8/virtqemud.8.gz
/usr/share/man/man8/virtsecretd.8.gz
/usr/share/man/man8/virtstoraged.8.gz
/usr/share/man/man8/virtvboxd.8.gz
/usr/share/man/man8/virtxend.8.gz
Signed-off-by: Han Han <hhan(a)redhat.com>
---
libvirt.spec.in | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/libvirt.spec.in b/libvirt.spec.in
index b5892987cf..4389bef406 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -1581,6 +1581,18 @@ exit 0
%{_mandir}/man8/virtlogd.8*
%{_mandir}/man8/virtlockd.8*
%{_mandir}/man8/virtproxyd.8*
+%{_mandir}/man8/virtxend.8*
+%{_mandir}/man8/virtvzd.8*
+%{_mandir}/man8/virtvboxd.8*
+%{_mandir}/man8/virtstoraged.8*
+%{_mandir}/man8/virtsecretd.8*
+%{_mandir}/man8/virtqemud.8*
+%{_mandir}/man8/virtnwfilterd.8*
+%{_mandir}/man8/virtnodedevd.8*
+%{_mandir}/man8/virtnetworkd.8*
+%{_mandir}/man8/virtlxcd.8*
+%{_mandir}/man8/virtinterfaced.8*
+%{_mandir}/man8/virtbhyved.8*
%{_mandir}/man7/virkey*.7*
%files daemon-config-network
--
2.29.2
3 years, 11 months