[libvirt] [PATCH] storage: implement rudimentary glusterfs pool refresh

Actually put gfapi to use, by allowing the creation of a gluster pool. Right now, all volumes are treated as raw; further patches will allow peering into files to allow for qcow2 files and backing chains, and reporting proper volume allocation. I've reported a couple of glusterfs bugs; if we were to require a minimum of (not-yet-released) glusterfs 3.5, we could use the new glfs_readdir [1] and not worry about the bogus return value of glfs_fini [2], but for now I'm testing with Fedora 19's glusterfs 3.4.1. [1] http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00085.html [2] http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00086.html * src/storage/storage_backend_gluster.c (virStorageBackendGlusterRefreshPool): Initial implementation. (virStorageBackendGlusterOpen, virStorageBackendGlusterClose): New helper functions. Signed-off-by: Eric Blake <eblake@redhat.com> --- Depends on these pre-req patches: https://www.redhat.com/archives/libvir-list/2013-October/msg01266.html https://www.redhat.com/archives/libvir-list/2013-October/msg00913.html My next task - figuring out the use of glfs_open() to read metadata from a file and determine backing chains. src/storage/storage_backend_gluster.c | 138 ++++++++++++++++++++++++++++++++-- 1 file changed, 133 insertions(+), 5 deletions(-) diff --git a/src/storage/storage_backend_gluster.c b/src/storage/storage_backend_gluster.c index 2863c73..b0b6ce6 100644 --- a/src/storage/storage_backend_gluster.c +++ b/src/storage/storage_backend_gluster.c @@ -23,20 +23,148 @@ #include <glusterfs/api/glfs.h> -#include "virerror.h" #include "storage_backend_gluster.h" #include "storage_conf.h" +#include "viralloc.h" +#include "virerror.h" +#include "virlog.h" +#include "virstoragefile.h" +#include "virstring.h" #define VIR_FROM_THIS VIR_FROM_STORAGE +struct _virStorageBackendGlusterState { + glfs_t *vol; +}; + +typedef struct _virStorageBackendGlusterState virStorageBackendGlusterState; +typedef virStorageBackendGlusterState *virStorageBackendGlusterStatePtr; + +static void +virStorageBackendGlusterClose(virStorageBackendGlusterStatePtr state) +{ + if (!state || !state->vol) + return; + /* Yuck - glusterfs-api-3.4.1 appears to always return -1 for + * glfs_fini, with errno containing random data, so there's no way + * to tell if it succeeded. 3.4.2 is supposed to fix this.*/ + if (glfs_fini(state->vol) < 0) + VIR_DEBUG("shutdown of gluster failed with errno %d", errno); +} + +static virStorageBackendGlusterStatePtr +virStorageBackendGlusterOpen(virStoragePoolObjPtr pool) +{ + virStorageBackendGlusterStatePtr ret = NULL; + + if (VIR_ALLOC(ret) < 0) + return NULL; + + if (!(ret->vol = glfs_new(pool->def->source.name))) { + virReportOOMError(); + goto error; + } + + /* FIXME: allow alternate transport in the pool xml */ + if (glfs_set_volfile_server(ret->vol, "tcp", + pool->def->source.hosts[0].name, + pool->def->source.hosts[0].port) < 0 || + glfs_init(ret->vol) < 0) { + virReportSystemError(errno, _("failed to connect to gluster %s/%s"), + pool->def->source.hosts[0].name, + pool->def->name); + goto error; + } + + return ret; + +error: + virStorageBackendGlusterClose(ret); + return NULL; +} static int virStorageBackendGlusterRefreshPool(virConnectPtr conn ATTRIBUTE_UNUSED, - virStoragePoolObjPtr pool ATTRIBUTE_UNUSED) + virStoragePoolObjPtr pool) { - virReportError(VIR_ERR_NO_SUPPORT, "%s", - _("gluster pool type not fully supported yet")); - return -1; + int ret = -1; + virStorageBackendGlusterStatePtr state = NULL; + struct { + struct dirent ent; + /* See comment below about readdir_r needing padding */ + char padding[MAX(1, 256 - (int) (sizeof(struct dirent) + - offsetof(struct dirent, d_name)))]; + } de; + struct dirent *ent; + glfs_fd_t *dir = NULL; + virStorageVolDefPtr vol = NULL; + struct statvfs sb; + + if (!(state = virStorageBackendGlusterOpen(pool))) + goto cleanup; + + /* Why oh why did glfs 3.4 decide to expose only readdir_r rather + * than readdir? POSIX admits that readdir_r is inherently a + * flawed design, because systems are not required to define + * NAME_MAX: http://austingroupbugs.net/view.php?id=696 + * http://womble.decadent.org.uk/readdir_r-advisory.html + * + * Fortunately, gluster uses _only_ XFS file systems, and XFS has + * a known NAME_MAX of 255; so we are guaranteed that if we + * provide 256 bytes of tail padding, then we have enough space to + * avoid buffer overflow no matter whether the OS used d_name[], + * d_name[1], or d_name[256] in its 'struct dirent'. + * http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00083.html + */ + + if (!(dir = glfs_opendir(state->vol, "."))) { + virReportSystemError(errno, _("cannot open path '%s'"), + pool->def->name); + goto cleanup; + } + while (!(errno = glfs_readdir_r(dir, &de.ent, &ent)) && ent) { + if (STREQ(ent->d_name, ".") || STREQ(ent->d_name, "..")) + continue; + if (VIR_ALLOC(vol) < 0 || + VIR_STRDUP(vol->name, ent->d_name) < 0) + goto cleanup; + /* FIXME - must open files to determine if they are non-raw */ + vol->type = VIR_STORAGE_VOL_NETWORK; + vol->target.format = VIR_STORAGE_FILE_RAW; + if (virAsprintf(&vol->key, "%s/%s", + pool->def->name, vol->name) < 0) + goto cleanup; + if (VIR_APPEND_ELEMENT(pool->volumes.objs, pool->volumes.count, + vol) < 0) + goto cleanup; + } + if (errno) { + virReportSystemError(errno, _("failed to read directory '%s'"), + pool->def->name); + goto cleanup; + } + + if (glfs_statvfs(state->vol, ".", &sb) < 0) { + virReportSystemError(errno, _("cannot statvfs path '%s'"), + pool->def->name); + goto cleanup; + } + + pool->def->capacity = ((unsigned long long)sb.f_frsize * + (unsigned long long)sb.f_blocks); + pool->def->available = ((unsigned long long)sb.f_bfree * + (unsigned long long)sb.f_frsize); + pool->def->allocation = pool->def->capacity - pool->def->available; + + ret = 0; +cleanup: + if (dir) + glfs_closedir(dir); + virStorageVolDefFree(vol); + virStorageBackendGlusterClose(state); + if (ret < 0) + virStoragePoolObjClearVols(pool); + return ret; } virStorageBackend virStorageBackendGluster = { -- 1.8.3.1

On Wed, Oct 30, 2013 at 05:30:27PM -0600, Eric Blake wrote:
Actually put gfapi to use, by allowing the creation of a gluster pool. Right now, all volumes are treated as raw; further patches will allow peering into files to allow for qcow2 files and backing chains, and reporting proper volume allocation.
I've reported a couple of glusterfs bugs; if we were to require a minimum of (not-yet-released) glusterfs 3.5, we could use the new glfs_readdir [1] and not worry about the bogus return value of glfs_fini [2], but for now I'm testing with Fedora 19's glusterfs 3.4.1.
[1] http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00085.html [2] http://lists.gnu.org/archive/html/gluster-devel/2013-10/msg00086.html
* src/storage/storage_backend_gluster.c (virStorageBackendGlusterRefreshPool): Initial implementation. (virStorageBackendGlusterOpen, virStorageBackendGlusterClose): New helper functions.
Signed-off-by: Eric Blake <eblake@redhat.com> ---
Depends on these pre-req patches: https://www.redhat.com/archives/libvir-list/2013-October/msg01266.html https://www.redhat.com/archives/libvir-list/2013-October/msg00913.html
My next task - figuring out the use of glfs_open() to read metadata from a file and determine backing chains.
NB, we don't want the src/util code to gain a dependancy on glusterfs RPMs. It is a very heavy weight package chain which cloud folks don't want us to pull in by default, hence my recent changes to RPM deps. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 10/31/2013 03:23 AM, Daniel P. Berrange wrote:
My next task - figuring out the use of glfs_open() to read metadata from a file and determine backing chains.
NB, we don't want the src/util code to gain a dependancy on glusterfs RPMs. It is a very heavy weight package chain which cloud folks don't want us to pull in by default, hence my recent changes to RPM deps.
Indeed; which is why I'm thinking that the src/util code has to call into the storage driver to ask if any registered backends can handle a network file name. Because we are modular, the ONLY use of glfs.h will be in the backend; if the correct rpm is installed to provide the gluster backend, then the network file can be decoded; if the rpm is not installed, then src/util has no choice but to treat the file as raw. I don't know if that means the libvirt-daemon-driver-storage needs to be further split into multiple .so files across multiple sub-rpms, so that a user can pick and choose which .so backends to install rather than dragging in all dependencies for all backends. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

Eric Blake wrote:
On 10/31/2013 03:23 AM, Daniel P. Berrange wrote:
My next task - figuring out the use of glfs_open() to read metadata from a file and determine backing chains.
NB, we don't want the src/util code to gain a dependancy on glusterfs RPMs. It is a very heavy weight package chain which cloud folks don't want us to pull in by default, hence my recent changes to RPM deps.
Indeed; which is why I'm thinking that the src/util code has to call into the storage driver to ask if any registered backends can handle a network file name. Because we are modular, the ONLY use of glfs.h will be in the backend; if the correct rpm is installed to provide the gluster backend, then the network file can be decoded; if the rpm is not installed, then src/util has no choice but to treat the file as raw.
I don't know if that means the libvirt-daemon-driver-storage needs to be further split into multiple .so files across multiple sub-rpms, so that a user can pick and choose which .so backends to install rather than dragging in all dependencies for all backends.
Just earlier this week I considered this approach while investigating an internal issue. In an older base product, we don't support rbd (and hence the rbd backend), but an add-on product does support rbd. We discussed a "libvirt-daemon-driver-storage-backend-rbd" subpackage that could included and used by the add-on product without affecting the base product. Regards, Jim

On Thu, Oct 31, 2013 at 06:35:56AM -0600, Eric Blake wrote:
On 10/31/2013 03:23 AM, Daniel P. Berrange wrote:
My next task - figuring out the use of glfs_open() to read metadata from a file and determine backing chains.
NB, we don't want the src/util code to gain a dependancy on glusterfs RPMs. It is a very heavy weight package chain which cloud folks don't want us to pull in by default, hence my recent changes to RPM deps.
Indeed; which is why I'm thinking that the src/util code has to call into the storage driver to ask if any registered backends can handle a network file name. Because we are modular, the ONLY use of glfs.h will be in the backend; if the correct rpm is installed to provide the gluster backend, then the network file can be decoded; if the rpm is not installed, then src/util has no choice but to treat the file as raw.
I don't know if that means the libvirt-daemon-driver-storage needs to be further split into multiple .so files across multiple sub-rpms, so that a user can pick and choose which .so backends to install rather than dragging in all dependencies for all backends.
The QEMU driver will need to use librbd to query backing file in any qcow2 files stored in the rbd volume. Likewise for gluster. So while making the storage driver modular would allow you to install a subset of the storage driver deps, you're still not going to avoid pulling in the gluster/rbd libraries, since the QEMU driver will pull them in anyway. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 10/31/2013 11:30 AM, Daniel P. Berrange wrote:
I don't know if that means the libvirt-daemon-driver-storage needs to be further split into multiple .so files across multiple sub-rpms, so that a user can pick and choose which .so backends to install rather than dragging in all dependencies for all backends.
The QEMU driver will need to use librbd to query backing file in any qcow2 files stored in the rbd volume. Likewise for gluster. So while making the storage driver modular would allow you to install a subset of the storage driver deps, you're still not going to avoid pulling in the gluster/rbd libraries, since the QEMU driver will pull them in anyway.
Not quite - making the storage driver modular would merely mean that libvirt would not support anything except raw rbd volumes if the rbd storage backend was not available. Yes, for backing file chains, qemu would need the full dependencies; but if you know you aren't going to use qemu with rbd, then having the storage modules would allow you to avoid dragging in rbd libraries. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On Thu, Oct 31, 2013 at 11:43:11AM -0600, Eric Blake wrote:
On 10/31/2013 11:30 AM, Daniel P. Berrange wrote:
I don't know if that means the libvirt-daemon-driver-storage needs to be further split into multiple .so files across multiple sub-rpms, so that a user can pick and choose which .so backends to install rather than dragging in all dependencies for all backends.
The QEMU driver will need to use librbd to query backing file in any qcow2 files stored in the rbd volume. Likewise for gluster. So while making the storage driver modular would allow you to install a subset of the storage driver deps, you're still not going to avoid pulling in the gluster/rbd libraries, since the QEMU driver will pull them in anyway.
Not quite - making the storage driver modular would merely mean that libvirt would not support anything except raw rbd volumes if the rbd storage backend was not available. Yes, for backing file chains, qemu would need the full dependencies; but if you know you aren't going to use qemu with rbd, then having the storage modules would allow you to avoid dragging in rbd libraries.
The QEMU driver should be near 100% functional without needing the storage driver at all. The only dep should be if you use <disk type=vol>. A <disk type=network> should have no dependancy on the storage driver, so we'd need rbd there even if the storage driver was not there. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 10/31/2013 11:49 AM, Daniel P. Berrange wrote:
The QEMU driver should be near 100% functional without needing the storage driver at all. The only dep should be if you use <disk type=vol>. A <disk type=network> should have no dependancy on the storage driver, so we'd need rbd there even if the storage driver was not there.
<disk type='network'><driver type='raw'/></disk> should be just fine without rbd driver pulled in by libvirt. But you are right that qemu will have pulled it in to be able to use the image. So as long as qemu is monolithic, even a raw-only use of a backing file doesn't free your system from needing the libraries. <disk type='network'><driver type=qcow2'/></disk> currently boots, but that is a mistake under sVirt because we can't chase the backing chain to guarantee that any backing files are present with correct permissions unless we have the rbd/gluster/... storage backend present. I'm arguing that <disk type='network'><driver type='qcow2'/> should be made an error unless the storage backend also provides the tools for chasing that backing chain, and that the storage backend be modular so that you only require the libraries for the particular network storage that you plan on using; while you're arguing one step further that <disk type='network'> is itself useless if qemu doesn't also support that network protocol. Where it gets interesting is that qemu itself is trying to go modular, so that you can decide via .so files whether rbd and gluster libraries are required for running qemu - once qemu is modular, then you can have a case where the same qemu binary either supports or fails to support rbd based on whether the rbd .so was installed; if that's the case, then libvirt should also have the ability to support or fail to support rbd based on whether the storage rbd is installed. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On Thu, Oct 31, 2013 at 11:58:48AM -0600, Eric Blake wrote:
On 10/31/2013 11:49 AM, Daniel P. Berrange wrote:
The QEMU driver should be near 100% functional without needing the storage driver at all. The only dep should be if you use <disk type=vol>. A <disk type=network> should have no dependancy on the storage driver, so we'd need rbd there even if the storage driver was not there.
<disk type='network'><driver type='raw'/></disk> should be just fine without rbd driver pulled in by libvirt. But you are right that qemu will have pulled it in to be able to use the image. So as long as qemu is monolithic, even a raw-only use of a backing file doesn't free your system from needing the libraries.
<disk type='network'><driver type=qcow2'/></disk> currently boots, but that is a mistake under sVirt because we can't chase the backing chain to guarantee that any backing files are present with correct permissions unless we have the rbd/gluster/... storage backend present. I'm arguing that <disk type='network'><driver type='qcow2'/> should be made an error unless the storage backend also provides the tools for chasing that backing chain, and that the storage backend be modular so that you only require the libraries for the particular network storage that you plan on using; while you're arguing one step further that <disk type='network'> is itself useless if qemu doesn't also support that network protocol.
If <disk type='network'/> were made to require a storage pool in order to use it, that would be a regression in functionality IMHO. This setup should be completely independant of the storage driver code. It needs to be able to resolve the backing file chain directly, without calling into the storage driver. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 10/31/2013 12:09 PM, Daniel P. Berrange wrote:
If <disk type='network'/> were made to require a storage pool in order to use it, that would be a regression in functionality IMHO. This setup should be completely independant of the storage driver code. It needs to be able to resolve the backing file chain directly, without calling into the storage driver.
I'm not requiring that a storage pool be present, only that the storage backend be present, by limiting the compilation against the network libraries to just the storage backends. Booting something that doesn't belong to a pool has always been supported (ideally, what we SHOULD do is make anything that a <domain> refers to but which does not map to an existing pool would then create a transient pool for the lifetime of the domain, so that all domain <disk> sources are backed by a pool - but that's a bigger project). And I don't see a regression - allowing network/raw without a storage backend will still work; where we need to change is that we currently allow network/qcow2 to boot, but this is cheating, and breaks if sVirt doesn't label the backing file. That is, we are treating network/qcow2 as if it were network/raw always; and if the storage backend is not present, we'd _still_ have to treat it as network/raw (rather than refuse to boot, as that would be a regression; but if there is any backing chain, boot may still fail anyways because of sVirt). But where we DO have a storage backend, then pool or no pool, we are able to chase the backing chain correctly, at the expense of dragging in the additional libraries. And since qemu is trying to go modular to avoid libraries, we should also make our storage backends modular to avoid libraries (where we then are forced to fall back to network/raw as the only sane handling of network sources). -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On Thu, Oct 31, 2013 at 12:17:54PM -0600, Eric Blake wrote:
On 10/31/2013 12:09 PM, Daniel P. Berrange wrote:
If <disk type='network'/> were made to require a storage pool in order to use it, that would be a regression in functionality IMHO. This setup should be completely independant of the storage driver code. It needs to be able to resolve the backing file chain directly, without calling into the storage driver.
I'm not requiring that a storage pool be present, only that the storage backend be present, by limiting the compilation against the network libraries to just the storage backends. Booting something that doesn't belong to a pool has always been supported (ideally, what we SHOULD do is make anything that a <domain> refers to but which does not map to an existing pool would then create a transient pool for the lifetime of the domain, so that all domain <disk> sources are backed by a pool - but that's a bigger project). And I don't see a regression - allowing network/raw without a storage backend will still work; where we need to change is that we currently allow network/qcow2 to boot, but this is cheating, and breaks if sVirt doesn't label the backing file. That is, we are treating network/qcow2 as if it were network/raw always; and if the storage backend is not present, we'd _still_ have to treat it as network/raw (rather than refuse to boot, as that would be a regression; but if there is any backing chain, boot may still fail anyways because of sVirt). But where we DO have a storage backend, then pool or no pool, we are able to chase the backing chain correctly, at the expense of dragging in the additional libraries. And since qemu is trying to go modular to avoid libraries, we should also make our storage backends modular to avoid libraries (where we then are forced to fall back to network/raw as the only sane handling of network sources).
IIUC, this is still going to be introducing a direct code path dependancy between the QEMU and storage driver code though. Currently src/qemu depends only on code in src/util & src/conf[1]. For the storage pool bits, it can call indirectly via src/libvirt.c APIs, but it does not call anything in src/storage/ directly. I really want this to remain that way, because avoiding direct calls is key to enabling us to split libvirtd up into separate daemons per driver in the future. Daniel [1] Yes, there's a dep on src/network too, which i was never at all happy about, and which needs to be refactored / removed in some manner in the future -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 10/31/2013 12:34 PM, Daniel P. Berrange wrote:
IIUC, this is still going to be introducing a direct code path dependancy between the QEMU and storage driver code though.
No, src/qemu would still get backing chain details via src/util, the way it has always done. Only src/util has to have the magic to call into src/storage backends (if the right backend is present, chase the backing chain of the network disk; if absent, forcefully assume raw as is already currently done).
Currently src/qemu depends only on code in src/util & src/conf[1]. For the storage pool bits, it can call indirectly via src/libvirt.c APIs, but it does not call anything in src/storage/ directly. I really want this to remain that way, because avoiding direct calls is key to enabling us to split libvirtd up into separate daemons per driver in the future.
Daniel
[1] Yes, there's a dep on src/network too, which i was never at all happy about, and which needs to be refactored / removed in some manner in the future
This is a useful discussion, because I don't want to be developing code that violates layering constraints. So, am I on the right track by allowing src/util to call into src/storage backends, or is the only proper way to do that to have src/util call in to src/libvirt.c APIs which then call into src/storage indirectly? How does this compare to src/qemu/qemu_process.c, using conn->secretDriver->secretGetValue(, VIR_SECRET_GET_VALUE_INTERNAL_CALL), as a means of calling into src/secret indirectly but without going through src/libvirt.c API? -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On Thu, Oct 31, 2013 at 12:46:07PM -0600, Eric Blake wrote:
On 10/31/2013 12:34 PM, Daniel P. Berrange wrote:
IIUC, this is still going to be introducing a direct code path dependancy between the QEMU and storage driver code though.
No, src/qemu would still get backing chain details via src/util, the way it has always done. Only src/util has to have the magic to call into src/storage backends (if the right backend is present, chase the backing chain of the network disk; if absent, forcefully assume raw as is already currently done).
Currently src/qemu depends only on code in src/util & src/conf[1]. For the storage pool bits, it can call indirectly via src/libvirt.c APIs, but it does not call anything in src/storage/ directly. I really want this to remain that way, because avoiding direct calls is key to enabling us to split libvirtd up into separate daemons per driver in the future.
Daniel
[1] Yes, there's a dep on src/network too, which i was never at all happy about, and which needs to be refactored / removed in some manner in the future
This is a useful discussion, because I don't want to be developing code that violates layering constraints. So, am I on the right track by allowing src/util to call into src/storage backends, or is the only proper way to do that to have src/util call in to src/libvirt.c APIs which then call into src/storage indirectly?
src/util shouldn't have any compile time dependancy on src/storage, or indeed any other directory. You could conceivably do it indirectly, by having src/storage register some kind of plugin with src/util at startup. That way it is only a runtime dep.
How does this compare to src/qemu/qemu_process.c, using conn->secretDriver->secretGetValue(, VIR_SECRET_GET_VALUE_INTERNAL_CALL), as a means of calling into src/secret indirectly but without going through src/libvirt.c API?
Again, that's a runtime only dep. So if the libvirt_driver_secret.la was not installed, it would not be fatal - the API calls would just get a runtime error. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 10/30/2013 05:30 PM, Eric Blake wrote:
Actually put gfapi to use, by allowing the creation of a gluster pool. Right now, all volumes are treated as raw; further patches will allow peering into files to allow for qcow2 files and backing chains, and reporting proper volume allocation.
Needs a v2 for several reasons:
+ +static void +virStorageBackendGlusterClose(virStorageBackendGlusterStatePtr state) +{ + if (!state || !state->vol) + return; + /* Yuck - glusterfs-api-3.4.1 appears to always return -1 for + * glfs_fini, with errno containing random data, so there's no way + * to tell if it succeeded. 3.4.2 is supposed to fix this.*/ + if (glfs_fini(state->vol) < 0) + VIR_DEBUG("shutdown of gluster failed with errno %d", errno); +}
Leaks state.
+ +static virStorageBackendGlusterStatePtr +virStorageBackendGlusterOpen(virStoragePoolObjPtr pool) +{ + virStorageBackendGlusterStatePtr ret = NULL; + + if (VIR_ALLOC(ret) < 0) + return NULL; + + if (!(ret->vol = glfs_new(pool->def->source.name))) {
Fails for subdirectory access (qemu allows gluster://host/vol/dir/file, so we should allow vol/dir as a pool name in the XML, but glfs requires us to open vol then chdir to dir, rather than opening vol/dir in one go).
+ virReportOOMError(); + goto error; + } + + /* FIXME: allow alternate transport in the pool xml */ + if (glfs_set_volfile_server(ret->vol, "tcp", + pool->def->source.hosts[0].name, + pool->def->source.hosts[0].port) < 0 || + glfs_init(ret->vol) < 0) { + virReportSystemError(errno, _("failed to connect to gluster %s/%s"), + pool->def->source.hosts[0].name, + pool->def->name);
Reports the wrong name (should be pool->def->source.name). And I'm still working on the followup patches for qcow2 metadata parsing... -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org
participants (3)
-
Daniel P. Berrange
-
Eric Blake
-
Jim Fehlig