[libvirt] [PATCH 1/1] qemu: add support for multiple gluster hosts

currently libvirt has the capability to parse only one host and convert that into URI formatted string, with the help of this patch libvirt will be able to parse multiple hosts from the domain xml and can convert that into JSON formatted string before: ------ example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24007' transport="tcp"/> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ... resultant string: file=gluster://1.2.3.4:24007/testvol/a.qcow2, \ if=none,id=drive-virtio-disk0,format=qcow2,cache=none after: ----- example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24009' transport="tcp"/> <host name='3.4.5.6' port="24008"/> <host name='5.6.7.8' /> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ... resultant string: -drive file=json:{ "file": { "driver": "gluster",, "volname": "testvol",, "image-path": "/a.qcow2",, "volfile-servers": [ { "server": "1.2.3.4",, "port": 24009,, "transport": "tcp" },, { "server": "3.4.5.6",, "port": 24008,, "transport": "tcp" },, { "server": "5.6.7.8",, "port": 24007,, "transport": "tcp" } ] },, "driver": "qcow2" } ,if=none,id=drive-virtio-disk0,cache=none if the number of hosts supplied is only one, then we use existing URI format for backward compatibility and if number of hosts is greater than one we switch to the new JSON format this patch requires qemu latest patch https://lists.gnu.org/archive/html/qemu-devel/2015-09/msg07062.html Credits: Sincere thanks to Kevin Wolf <kwolf@redhat.com> and "Deepak C Shetty" <deepakcs@redhat.com> for their support Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> --- src/qemu/qemu_command.c | 192 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 145 insertions(+), 47 deletions(-) diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index bb1835c..8c1bf1a 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -3256,16 +3256,106 @@ qemuNetworkDriveGetPort(int protocol, return -1; } +#define QEMU_DEFAULT_GLUSTER_PORT 24007 + +static virJSONValuePtr +qemuBuildGlusterDriveJSON(virStorageSourcePtr src) +{ + virJSONValuePtr parent = NULL; + virJSONValuePtr object = NULL; + virJSONValuePtr dict_array = NULL; + int port; + int i; + + if (!(parent = virJSONValueNewObject())) + return NULL; + /* 1. prepare { driver:"gluster", volname:"testvol", image-path:"/a.img"} */ + if (virJSONValueObjectAdd(parent, + "s:driver", virStorageNetProtocolTypeToString(src->protocol), + "s:volname", src->volume, + "s:image-path", src->path, + NULL) < 0) + goto cleanup; + + if (!(dict_array = virJSONValueNewArray())) + goto cleanup; + /* 2. prepare array [ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] */ + for (i = 0; i < src->nhosts; i++) { + if (!(object = virJSONValueNewObject())) + goto cleanup; + port = qemuNetworkDriveGetPort(src->protocol, src->hosts[i].port); + if (virJSONValueObjectAdd(object, "s:server", src->hosts[i].name, + "i:port", port ? port : QEMU_DEFAULT_GLUSTER_PORT , + "s:transport", + virStorageNetHostTransportTypeToString(src->hosts[i].transport), + NULL) < 0) + goto cleanup; + if (virJSONValueArrayAppend(dict_array, object) < 0) { + virJSONValueFree(object); + goto cleanup; + } + } + /* 3. append 1 and 2 + * { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] } + */ + if (virJSONValueObjectAppend(parent, "volfile-servers", dict_array) < 0) { + virJSONValueFree(dict_array); + goto cleanup; + } + /* 4. assign key to 3 + * { file: { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] } } + */ + if (!(object = virJSONValueNewObject())) + goto cleanup; + if (virJSONValueObjectAppend(object, "file", parent) < 0) { + virJSONValueFree(parent); + goto cleanup; + } + /* 5. append format to 4 + * { file: { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] }, driver:"qcow2" } + */ + if (virJSONValueObjectAdd(object, + "s:driver", virStorageFileFormatTypeToString(src->format), + NULL) < 0) + goto cleanup; + else + /* since we have already captured the format type, let's make it '0' to + * avoid further checks for format information + */ + src->format = 0; + + return object; + +cleanup: + virJSONValueFree(dict_array); + virJSONValueFree(parent); + virJSONValueFree(object); + return NULL; +} + #define QEMU_DEFAULT_NBD_PORT "10809" static char * -qemuBuildNetworkDriveURI(virStorageSourcePtr src, +qemuBuildNetworkDriveStr(virStorageSourcePtr src, const char *username, const char *secret) { char *ret = NULL; + char *str = NULL; virBuffer buf = VIR_BUFFER_INITIALIZER; virURIPtr uri = NULL; + virJSONValuePtr json = NULL; size_t i; switch ((virStorageNetProtocol) src->protocol) { @@ -3331,61 +3421,67 @@ qemuBuildNetworkDriveURI(virStorageSourcePtr src, case VIR_STORAGE_NET_PROTOCOL_TFTP: case VIR_STORAGE_NET_PROTOCOL_ISCSI: case VIR_STORAGE_NET_PROTOCOL_GLUSTER: - if (src->nhosts != 1) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("protocol '%s' accepts only one host"), - virStorageNetProtocolTypeToString(src->protocol)); - goto cleanup; - } - - if (VIR_ALLOC(uri) < 0) - goto cleanup; - - if (src->hosts->transport == VIR_STORAGE_NET_HOST_TRANS_TCP) { - if (VIR_STRDUP(uri->scheme, - virStorageNetProtocolTypeToString(src->protocol)) < 0) + if (src->nhosts == 1) { + /* URI syntax generation */ + if (VIR_ALLOC(uri) < 0) goto cleanup; - } else { - if (virAsprintf(&uri->scheme, "%s+%s", - virStorageNetProtocolTypeToString(src->protocol), - virStorageNetHostTransportTypeToString(src->hosts->transport)) < 0) - goto cleanup; - } - - if ((uri->port = qemuNetworkDriveGetPort(src->protocol, src->hosts->port)) < 0) - goto cleanup; - if (src->path) { - if (src->volume) { - if (virAsprintf(&uri->path, "/%s%s", - src->volume, src->path) < 0) + if (src->hosts->transport == VIR_STORAGE_NET_HOST_TRANS_TCP) { + if (VIR_STRDUP(uri->scheme, + virStorageNetProtocolTypeToString(src->protocol)) < 0) goto cleanup; } else { - if (virAsprintf(&uri->path, "%s%s", - src->path[0] == '/' ? "" : "/", - src->path) < 0) + if (virAsprintf(&uri->scheme, "%s+%s", + virStorageNetProtocolTypeToString(src->protocol), + virStorageNetHostTransportTypeToString(src->hosts->transport)) < 0) goto cleanup; } - } - if (src->hosts->socket && - virAsprintf(&uri->query, "socket=%s", src->hosts->socket) < 0) - goto cleanup; + if ((uri->port = qemuNetworkDriveGetPort(src->protocol, src->hosts->port)) < 0) + goto cleanup; - if (username) { - if (secret) { - if (virAsprintf(&uri->user, "%s:%s", username, secret) < 0) - goto cleanup; - } else { - if (VIR_STRDUP(uri->user, username) < 0) - goto cleanup; + if (src->path) { + if (src->volume) { + if (virAsprintf(&uri->path, "/%s%s", + src->volume, src->path) < 0) + goto cleanup; + } else { + if (virAsprintf(&uri->path, "%s%s", + src->path[0] == '/' ? "" : "/", + src->path) < 0) + goto cleanup; + } } - } - if (VIR_STRDUP(uri->server, src->hosts->name) < 0) - goto cleanup; + if (src->hosts->socket && + virAsprintf(&uri->query, "socket=%s", src->hosts->socket) < 0) + goto cleanup; - ret = virURIFormat(uri); + if (username) { + if (secret) { + if (virAsprintf(&uri->user, "%s:%s", username, secret) < 0) + goto cleanup; + } else { + if (VIR_STRDUP(uri->user, username) < 0) + goto cleanup; + } + } + + if (VIR_STRDUP(uri->server, src->hosts->name) < 0) + goto cleanup; + + ret = virURIFormat(uri); + } else { + /* switch to new json formated syntax */ + if(!(json = qemuBuildGlusterDriveJSON(src))) + goto cleanup; + + if (!(str = virJSONValueToString(json, false))) + goto cleanup; + + if (virAsprintf (&ret, "json:%s", str) < 0) + goto cleanup; + } break; @@ -3472,7 +3568,9 @@ qemuBuildNetworkDriveURI(virStorageSourcePtr src, } cleanup: + VIR_FREE(str); virBufferFreeAndReset(&buf); + virJSONValueFree(json); virURIFree(uri); return ret; @@ -3530,7 +3628,7 @@ qemuGetDriveSourceString(virStorageSourcePtr src, break; case VIR_STORAGE_TYPE_NETWORK: - if (!(*source = qemuBuildNetworkDriveURI(src, username, secret))) + if (!(*source = qemuBuildNetworkDriveStr(src, username, secret))) goto cleanup; break; @@ -6391,7 +6489,7 @@ qemuBuildSCSIiSCSIHostdevDrvStr(virConnectPtr conn, src.nhosts = iscsisrc->nhosts; /* Rather than pull what we think we want - use the network disk code */ - source = qemuBuildNetworkDriveURI(&src, username, secret); + source = qemuBuildNetworkDriveStr(&src, username, secret); cleanup: VIR_FREE(secret); -- 2.1.0

On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
currently libvirt has the capability to parse only one host and convert that into URI formatted string, with the help of this patch libvirt will be able to parse multiple hosts from the domain xml and can convert that into JSON formatted string
before: ------ example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24007' transport="tcp"/> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ...
resultant string: file=gluster://1.2.3.4:24007/testvol/a.qcow2, \ if=none,id=drive-virtio-disk0,format=qcow2,cache=none
after: ----- example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24009' transport="tcp"/> <host name='3.4.5.6' port="24008"/> <host name='5.6.7.8' /> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ...
resultant string: -drive file=json:{
"file": { "driver": "gluster",, "volname": "testvol",, "image-path": "/a.qcow2",, "volfile-servers": [ { "server": "1.2.3.4",, "port": 24009,, "transport": "tcp" },, { "server": "3.4.5.6",, "port": 24008,, "transport": "tcp" },, { "server": "5.6.7.8",, "port": 24007,,
The double commas look like a result of our command line escaping function. Are they actually required with 'json:' sources? If no, we will need probably a way to avoid them.
"transport": "tcp" } ] },, "driver": "qcow2" } ,if=none,id=drive-virtio-disk0,cache=none
if the number of hosts supplied is only one, then we use existing URI format for backward compatibility and if number of hosts is greater than one we switch to the new JSON format
this patch requires qemu latest patch https://lists.gnu.org/archive/html/qemu-devel/2015-09/msg07062.html
So this patch, if it will be acked needs to wait until qemu accepts the patch as final.
Credits: Sincere thanks to Kevin Wolf <kwolf@redhat.com> and "Deepak C Shetty" <deepakcs@redhat.com> for their support
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> --- src/qemu/qemu_command.c | 192 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 145 insertions(+), 47 deletions(-)
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index bb1835c..8c1bf1a 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -3256,16 +3256,106 @@ qemuNetworkDriveGetPort(int protocol, return -1; }
+#define QEMU_DEFAULT_GLUSTER_PORT 24007
qemuNetworkDriveGetPort can be possibly improved to include this too, instead of the define ... [1]
+ +static virJSONValuePtr +qemuBuildGlusterDriveJSON(virStorageSourcePtr src) +{ + virJSONValuePtr parent = NULL; + virJSONValuePtr object = NULL; + virJSONValuePtr dict_array = NULL; + int port; + int i;
This won't pass syntax-check. Please make sure that you'll run it before posting patches.
+ + if (!(parent = virJSONValueNewObject())) + return NULL; + /* 1. prepare { driver:"gluster", volname:"testvol", image-path:"/a.img"} */ + if (virJSONValueObjectAdd(parent, + "s:driver", virStorageNetProtocolTypeToString(src->protocol), + "s:volname", src->volume, + "s:image-path", src->path, + NULL) < 0) + goto cleanup; + + if (!(dict_array = virJSONValueNewArray())) + goto cleanup; + /* 2. prepare array [ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] */ + for (i = 0; i < src->nhosts; i++) { + if (!(object = virJSONValueNewObject())) + goto cleanup; + port = qemuNetworkDriveGetPort(src->protocol, src->hosts[i].port); + if (virJSONValueObjectAdd(object, "s:server", src->hosts[i].name, + "i:port", port ? port : QEMU_DEFAULT_GLUSTER_PORT ,
[1] and a ugly ternary operator.
+ "s:transport", + virStorageNetHostTransportTypeToString(src->hosts[i].transport), + NULL) < 0) + goto cleanup; + if (virJSONValueArrayAppend(dict_array, object) < 0) { + virJSONValueFree(object);
The above statement is already in the cleanup section.
+ goto cleanup; + } + } + /* 3. append 1 and 2 + * { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] } + */ + if (virJSONValueObjectAppend(parent, "volfile-servers", dict_array) < 0) { + virJSONValueFree(dict_array);
The above statement is already in the cleanup section.
+ goto cleanup; + } + /* 4. assign key to 3 + * { file: { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] } } + */ + if (!(object = virJSONValueNewObject())) + goto cleanup; + if (virJSONValueObjectAppend(object, "file", parent) < 0) { + virJSONValueFree(parent);
Same thing.
+ goto cleanup; + } + /* 5. append format to 4 + * { file: { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] }, driver:"qcow2" } + */ + if (virJSONValueObjectAdd(object, + "s:driver", virStorageFileFormatTypeToString(src->format), + NULL) < 0) + goto cleanup; + else + /* since we have already captured the format type, let's make it '0' to + * avoid further checks for format information
NACK to this, this would remove it from the live definition and it would disappear from the live XML. That can't happen. Various block job operations and a lot of other code depend on knowing the format. Also this violates the code style since the "body" of the else statement is multi-line due to the comment so both paths need a block. Also, since the true part jumps to 'cleanup' you can remove else completely to make the code more unambiguous.
+ */ + src->format = 0; + + return object; + +cleanup: + virJSONValueFree(dict_array); + virJSONValueFree(parent); + virJSONValueFree(object); + return NULL; +} + #define QEMU_DEFAULT_NBD_PORT "10809"
static char * -qemuBuildNetworkDriveURI(virStorageSourcePtr src, +qemuBuildNetworkDriveStr(virStorageSourcePtr src, const char *username, const char *secret) { char *ret = NULL; + char *str = NULL; virBuffer buf = VIR_BUFFER_INITIALIZER; virURIPtr uri = NULL; + virJSONValuePtr json = NULL; size_t i;
switch ((virStorageNetProtocol) src->protocol) { @@ -3331,61 +3421,67 @@ qemuBuildNetworkDriveURI(virStorageSourcePtr src, case VIR_STORAGE_NET_PROTOCOL_TFTP: case VIR_STORAGE_NET_PROTOCOL_ISCSI: case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
So this is done for all protocols that originally use an URI ...
- if (src->nhosts != 1) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("protocol '%s' accepts only one host"), - virStorageNetProtocolTypeToString(src->protocol)); - goto cleanup; - } - - if (VIR_ALLOC(uri) < 0) - goto cleanup; - - if (src->hosts->transport == VIR_STORAGE_NET_HOST_TRANS_TCP) { - if (VIR_STRDUP(uri->scheme, - virStorageNetProtocolTypeToString(src->protocol)) < 0) + if (src->nhosts == 1) { + /* URI syntax generation */ + if (VIR_ALLOC(uri) < 0) goto cleanup; - } else { - if (virAsprintf(&uri->scheme, "%s+%s", - virStorageNetProtocolTypeToString(src->protocol), - virStorageNetHostTransportTypeToString(src->hosts->transport)) < 0) - goto cleanup; - } - - if ((uri->port = qemuNetworkDriveGetPort(src->protocol, src->hosts->port)) < 0) - goto cleanup;
- if (src->path) { - if (src->volume) { - if (virAsprintf(&uri->path, "/%s%s", - src->volume, src->path) < 0) + if (src->hosts->transport == VIR_STORAGE_NET_HOST_TRANS_TCP) { + if (VIR_STRDUP(uri->scheme, + virStorageNetProtocolTypeToString(src->protocol)) < 0) goto cleanup; } else { - if (virAsprintf(&uri->path, "%s%s", - src->path[0] == '/' ? "" : "/", - src->path) < 0) + if (virAsprintf(&uri->scheme, "%s+%s", + virStorageNetProtocolTypeToString(src->protocol), + virStorageNetHostTransportTypeToString(src->hosts->transport)) < 0) goto cleanup; } - }
- if (src->hosts->socket && - virAsprintf(&uri->query, "socket=%s", src->hosts->socket) < 0) - goto cleanup; + if ((uri->port = qemuNetworkDriveGetPort(src->protocol, src->hosts->port)) < 0) + goto cleanup;
- if (username) { - if (secret) { - if (virAsprintf(&uri->user, "%s:%s", username, secret) < 0) - goto cleanup; - } else { - if (VIR_STRDUP(uri->user, username) < 0) - goto cleanup; + if (src->path) { + if (src->volume) { + if (virAsprintf(&uri->path, "/%s%s", + src->volume, src->path) < 0) + goto cleanup; + } else { + if (virAsprintf(&uri->path, "%s%s", + src->path[0] == '/' ? "" : "/", + src->path) < 0) + goto cleanup; + } } - }
- if (VIR_STRDUP(uri->server, src->hosts->name) < 0) - goto cleanup; + if (src->hosts->socket && + virAsprintf(&uri->query, "socket=%s", src->hosts->socket) < 0) + goto cleanup;
- ret = virURIFormat(uri); + if (username) { + if (secret) { + if (virAsprintf(&uri->user, "%s:%s", username, secret) < 0) + goto cleanup; + } else { + if (VIR_STRDUP(uri->user, username) < 0) + goto cleanup; + } + } + + if (VIR_STRDUP(uri->server, src->hosts->name) < 0) + goto cleanup; + + ret = virURIFormat(uri); + } else { + /* switch to new json formated syntax */
... but you do Gluster here unconditionally. This would actually turn a HTTP (or other) disk definition into a gluster source. That can't happen, this either needs to go into a separate section under the VIR_STORAGE_NET_PROTOCOL_GLUSTER or the JSON generator needs to be able to generate json for all the protocols.
+ if(!(json = qemuBuildGlusterDriveJSON(src))) + goto cleanup; + + if (!(str = virJSONValueToString(json, false))) + goto cleanup; + + if (virAsprintf (&ret, "json:%s", str) < 0) + goto cleanup; + }
break;
So while all the above looks pretty okay at this point (except for clearing the image type) this patch is incomplete. Since libvirt is loading the backing chain to be able to traverse it and set correct permissions on individual images. This equals to two additional parts of the code that need modification: 1) virStorageFileBackendGlusterInit has the same "src->nhosts != 1" condition and is not prepared to work with multiple protocols 2) I presume (I didn't test it yet) that the qemu patch that adds this stuff will result into using the "json:{" protocol string in the 'backing_store' field once you do a snapshot of a gluster disk that uses this new syntax. If that happens the VM will not be able to start in libvirt any more, as libvirt does not have a 'json:{' protocol parser to parse the backing store. So in order to accept this patch, the helpers in src/util/virstoragefile.c (virStorageSourceParseBackingURI) will need to be made modular so that they can accept a driver specific callback, that will then parse the 'json:{' source definitions. (We can't just hardcode it there, since src/util "should" not contain any qemu specific code.) Peter

On Mon, Oct 05, 2015 at 13:01:27 +0200, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
...
+ int port; + int i;
This won't pass syntax-check. Please make sure that you'll run it before posting patches.
+
Here are the problems pointed out by running 'make syntax-check' on this patch: src/qemu/qemu_command.c:3340:cleanup: maint.mk: Top-level labels should be indented by one space cfg.mk:917: recipe for target 'sc_require_space_before_label' failed src/qemu/qemu_command.c:3268: int i; maint.mk: use size_t, not int/unsigned int for loop vars i, j, k cfg.mk:574: recipe for target 'sc_prohibit_int_ijk' failed Whitespace before (semi)colon: src/qemu/qemu_command.c:3290: "i:port", port ? port : QEMU_DEFAULT_GLUSTER_PORT , No whitespace after keyword: src/qemu/qemu_command.c:3476: if(!(json = qemuBuildGlusterDriveJSON(src))) Whitespace after non-keyword: src/qemu/qemu_command.c:3482: if (virAsprintf (&ret, "json:%s", str) < 0) maint.mk: incorrect formatting, see HACKING for rules cfg.mk:1065: recipe for target 'bracket-spacing-check' failed Peter

Am 05.10.2015 um 13:01 hat Peter Krempa geschrieben:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
currently libvirt has the capability to parse only one host and convert that into URI formatted string, with the help of this patch libvirt will be able to parse multiple hosts from the domain xml and can convert that into JSON formatted string
before: ------ example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24007' transport="tcp"/> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ...
resultant string: file=gluster://1.2.3.4:24007/testvol/a.qcow2, \ if=none,id=drive-virtio-disk0,format=qcow2,cache=none
after: ----- example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24009' transport="tcp"/> <host name='3.4.5.6' port="24008"/> <host name='5.6.7.8' /> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ...
resultant string: -drive file=json:{
"file": { "driver": "gluster",, "volname": "testvol",, "image-path": "/a.qcow2",, "volfile-servers": [ { "server": "1.2.3.4",, "port": 24009,, "transport": "tcp" },, { "server": "3.4.5.6",, "port": 24008,, "transport": "tcp" },, { "server": "5.6.7.8",, "port": 24007,,
The double commas look like a result of our command line escaping function. Are they actually required with 'json:' sources? If no, we will need probably a way to avoid them.
This looks like it's used on the command line (i.e. the file=... parameter of -drive). In this case, the escaping is necessary so the string isn't split in the middle of the JSON object. If you used the same thing in a blockdev-add QMP command, you wouldn't need escaping, obviously, and double commas would be a syntax error. It's possible to avoid JSON syntax on the command line, but as you mentioned yourself below, it's necessary in other contexts (backing file strings), so it probably makes sense to use it here as well. For the record, the version without JSON would look like this (without the whitespace; only formatting it this way for readability): -drive file.driver=gluster, file.volname=testvol, file.image-path=/a.qcow2, file.volfile-servers.0.server=1.2.3.4, file.volfile-servers.0.port=24009, ...
"transport": "tcp" } ] },, "driver": "qcow2" } ,if=none,id=drive-virtio-disk0,cache=none
if the number of hosts supplied is only one, then we use existing URI format for backward compatibility and if number of hosts is greater than one we switch to the new JSON format
this patch requires qemu latest patch https://lists.gnu.org/archive/html/qemu-devel/2015-09/msg07062.html
So this patch, if it will be acked needs to wait until qemu accepts the patch as final.
Yes, definitely.
Credits: Sincere thanks to Kevin Wolf <kwolf@redhat.com> and "Deepak C Shetty" <deepakcs@redhat.com> for their support
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
[...]
Since libvirt is loading the backing chain to be able to traverse it and set correct permissions on individual images. This equals to two additional parts of the code that need modification:
1) virStorageFileBackendGlusterInit has the same "src->nhosts != 1" condition and is not prepared to work with multiple protocols
2) I presume (I didn't test it yet) that the qemu patch that adds this stuff will result into using the "json:{" protocol string in the 'backing_store' field once you do a snapshot of a gluster disk that uses this new syntax. If that happens the VM will not be able to start in libvirt any more, as libvirt does not have a 'json:{' protocol parser to parse the backing store.
Yes, the json: pseudo-protocol is the only way to put structured options into the backing file string that image formats provide. If qemu can't generate a simpler filename string (like a URI) that contains all of the necessary information, this is what it puts there. Kevin

On Mon, Oct 05, 2015 at 14:51:00 +0200, Kevin Wolf wrote:
Am 05.10.2015 um 13:01 hat Peter Krempa geschrieben:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
...
resultant string: -drive file=json:{
"file": { "driver": "gluster",, "volname": "testvol",, "image-path": "/a.qcow2",, "volfile-servers": [ { "server": "1.2.3.4",, "port": 24009,, "transport": "tcp" },, { "server": "3.4.5.6",, "port": 24008,, "transport": "tcp" },, { "server": "5.6.7.8",, "port": 24007,,
The double commas look like a result of our command line escaping function. Are they actually required with 'json:' sources? If no, we will need probably a way to avoid them.
This looks like it's used on the command line (i.e. the file=... parameter of -drive). In this case, the escaping is necessary so the string isn't split in the middle of the JSON object.
Okay, so then our shell escaper is doing the right thing here.
If you used the same thing in a blockdev-add QMP command, you wouldn't need escaping, obviously, and double commas would be a syntax error.
It's possible to avoid JSON syntax on the command line, but as you mentioned yourself below, it's necessary in other contexts (backing file strings), so it probably makes sense to use it here as well.
For the record, the version without JSON would look like this (without the whitespace; only formatting it this way for readability):
-drive file.driver=gluster, file.volname=testvol, file.image-path=/a.qcow2, file.volfile-servers.0.server=1.2.3.4, file.volfile-servers.0.port=24009,
It looks like that the syntax above can be very well generated from the same data structures we use to generate the JSON code. That was also my original thought how I'd approach this. On the commandline I'd use the syntax above via a separate generator and on monitor we can go with json. This will allow us to avoid the uglyness of json on the commannd line and the output might perhaps be even shorter. At any rate we definitely need a parser and a formatter for the json options. Peter

Am 05.10.2015 um 15:01 hat Peter Krempa geschrieben:
On Mon, Oct 05, 2015 at 14:51:00 +0200, Kevin Wolf wrote:
Am 05.10.2015 um 13:01 hat Peter Krempa geschrieben:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
...
resultant string: -drive file=json:{
"file": { "driver": "gluster",, "volname": "testvol",, "image-path": "/a.qcow2",, "volfile-servers": [ { "server": "1.2.3.4",, "port": 24009,, "transport": "tcp" },, { "server": "3.4.5.6",, "port": 24008,, "transport": "tcp" },, { "server": "5.6.7.8",, "port": 24007,,
The double commas look like a result of our command line escaping function. Are they actually required with 'json:' sources? If no, we will need probably a way to avoid them.
This looks like it's used on the command line (i.e. the file=... parameter of -drive). In this case, the escaping is necessary so the string isn't split in the middle of the JSON object.
Okay, so then our shell escaper is doing the right thing here.
If you used the same thing in a blockdev-add QMP command, you wouldn't need escaping, obviously, and double commas would be a syntax error.
It's possible to avoid JSON syntax on the command line, but as you mentioned yourself below, it's necessary in other contexts (backing file strings), so it probably makes sense to use it here as well.
For the record, the version without JSON would look like this (without the whitespace; only formatting it this way for readability):
-drive file.driver=gluster, file.volname=testvol, file.image-path=/a.qcow2, file.volfile-servers.0.server=1.2.3.4, file.volfile-servers.0.port=24009,
It looks like that the syntax above can be very well generated from the same data structures we use to generate the JSON code. That was also my original thought how I'd approach this. On the commandline I'd use the syntax above via a separate generator and on monitor we can go with json. This will allow us to avoid the uglyness of json on the commannd line and the output might perhaps be even shorter.
No, most certainly it won't be shorter. The biggest drawback of the dot syntax is that it repeats long prefixes all the time, so contrary to what you might intuitively think, for humans reading a command line might actually become harder (especially if options aren't ordered) than with JSON syntax, which at least keeps all fields of a single object in the same place. As far as qemu is concerned, you're free to choose whatever you prefer, though. The first thing that is done with json: filenames is that they are extracted as if they were separatly specified options, so there's no semantic difference.
At any rate we definitely need a parser and a formatter for the json options.
You do. :-) Kevin

On Mon, Oct 05, 2015 at 15:20:58 +0200, Kevin Wolf wrote:
Am 05.10.2015 um 15:01 hat Peter Krempa geschrieben:
On Mon, Oct 05, 2015 at 14:51:00 +0200, Kevin Wolf wrote:
Am 05.10.2015 um 13:01 hat Peter Krempa geschrieben:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
...
-drive file.driver=gluster, file.volname=testvol, file.image-path=/a.qcow2, file.volfile-servers.0.server=1.2.3.4, file.volfile-servers.0.port=24009,
It looks like that the syntax above can be very well generated from the same data structures we use to generate the JSON code. That was also my original thought how I'd approach this. On the commandline I'd use the syntax above via a separate generator and on monitor we can go with json. This will allow us to avoid the uglyness of json on the commannd line and the output might perhaps be even shorter.
No, most certainly it won't be shorter.
The biggest drawback of the dot syntax is that it repeats long prefixes all the time, so contrary to what you might intuitively think, for humans reading a command line might actually become harder (especially if options aren't ordered) than with JSON syntax, which at least keeps all fields of a single object in the same place.
I got carried away by the pretty-formatted json output. You are right, the output will certainly be longer.
As far as qemu is concerned, you're free to choose whatever you prefer, though. The first thing that is done with json: filenames is that they are extracted as if they were separatly specified options, so there's no semantic difference.
Okay, I'll probably decide arbitrarily by looking at the resulting strings later, this certainly isn't a pressing issue in this series.
At any rate we definitely need a parser and a formatter for the json options.
You do. :-)
This one is. Peter

On 10/05/2015 04:31 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
currently libvirt has the capability to parse only one host and convert that into URI formatted string, with the help of this patch libvirt will be able to parse multiple hosts from the domain xml and can convert that into JSON formatted string
before: ------ example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24007' transport="tcp"/> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ...
resultant string: file=gluster://1.2.3.4:24007/testvol/a.qcow2, \ if=none,id=drive-virtio-disk0,format=qcow2,cache=none
after: ----- example.xml: ... <disk type='network' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source protocol='gluster' name='testvol/a.qcow2'> <host name='1.2.3.4' port='24009' transport="tcp"/> <host name='3.4.5.6' port="24008"/> <host name='5.6.7.8' /> </source> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> ...
resultant string: -drive file=json:{ "file": { "driver": "gluster",, "volname": "testvol",, "image-path": "/a.qcow2",, "volfile-servers": [ { "server": "1.2.3.4",, "port": 24009,, "transport": "tcp" },, { "server": "3.4.5.6",, "port": 24008,, "transport": "tcp" },, { "server": "5.6.7.8",, "port": 24007,, The double commas look like a result of our command line escaping function. Are they actually required with 'json:' sources? If no, we will need probably a way to avoid them.
"transport": "tcp" } ] },, "driver": "qcow2" } ,if=none,id=drive-virtio-disk0,cache=none
if the number of hosts supplied is only one, then we use existing URI format for backward compatibility and if number of hosts is greater than one we switch to the new JSON format
this patch requires qemu latest patch https://lists.gnu.org/archive/html/qemu-devel/2015-09/msg07062.html
So this patch, if it will be acked needs to wait until qemu accepts the patch as final.
Credits: Sincere thanks to Kevin Wolf <kwolf@redhat.com> and "Deepak C Shetty" <deepakcs@redhat.com> for their support
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> --- src/qemu/qemu_command.c | 192 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 145 insertions(+), 47 deletions(-)
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index bb1835c..8c1bf1a 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -3256,16 +3256,106 @@ qemuNetworkDriveGetPort(int protocol, return -1; }
+#define QEMU_DEFAULT_GLUSTER_PORT 24007 qemuNetworkDriveGetPort can be possibly improved to include this too, instead of the define ... [1]
+ +static virJSONValuePtr +qemuBuildGlusterDriveJSON(virStorageSourcePtr src) +{ + virJSONValuePtr parent = NULL; + virJSONValuePtr object = NULL; + virJSONValuePtr dict_array = NULL; + int port; + int i; This won't pass syntax-check. Please make sure that you'll run it before posting patches.
+ + if (!(parent = virJSONValueNewObject())) + return NULL; + /* 1. prepare { driver:"gluster", volname:"testvol", image-path:"/a.img"} */ + if (virJSONValueObjectAdd(parent, + "s:driver", virStorageNetProtocolTypeToString(src->protocol), + "s:volname", src->volume, + "s:image-path", src->path, + NULL) < 0) + goto cleanup; + + if (!(dict_array = virJSONValueNewArray())) + goto cleanup; + /* 2. prepare array [ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] */ + for (i = 0; i < src->nhosts; i++) { + if (!(object = virJSONValueNewObject())) + goto cleanup; + port = qemuNetworkDriveGetPort(src->protocol, src->hosts[i].port); + if (virJSONValueObjectAdd(object, "s:server", src->hosts[i].name, + "i:port", port ? port : QEMU_DEFAULT_GLUSTER_PORT , [1] and a ugly ternary operator.
+ "s:transport", + virStorageNetHostTransportTypeToString(src->hosts[i].transport), + NULL) < 0) + goto cleanup; + if (virJSONValueArrayAppend(dict_array, object) < 0) { + virJSONValueFree(object); The above statement is already in the cleanup section.
+ goto cleanup; + } + } + /* 3. append 1 and 2 + * { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] } + */ + if (virJSONValueObjectAppend(parent, "volfile-servers", dict_array) < 0) { + virJSONValueFree(dict_array); The above statement is already in the cleanup section.
+ goto cleanup; + } + /* 4. assign key to 3 + * { file: { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] } } + */ + if (!(object = virJSONValueNewObject())) + goto cleanup; + if (virJSONValueObjectAppend(object, "file", parent) < 0) { + virJSONValueFree(parent); Same thing.
+ goto cleanup; + } + /* 5. append format to 4 + * { file: { driver:"gluster", volname:"testvol", image-path:"/a.img", + * volfile-servers :[ {server:"1.2.3.4", port:24007, transport:"tcp"} , + * {server:"5.6.7.8", port:24008, transport:"rdma"}, + * {}, ... ] }, driver:"qcow2" } + */ + if (virJSONValueObjectAdd(object, + "s:driver", virStorageFileFormatTypeToString(src->format), + NULL) < 0) + goto cleanup; + else + /* since we have already captured the format type, let's make it '0' to + * avoid further checks for format information NACK to this, this would remove it from the live definition and it would disappear from the live XML. That can't happen. Various block job operations and a lot of other code depend on knowing the format.
Also this violates the code style since the "body" of the else statement is multi-line due to the comment so both paths need a block. Also, since the true part jumps to 'cleanup' you can remove else completely to make the code more unambiguous.
+ */ + src->format = 0; + + return object; + +cleanup: + virJSONValueFree(dict_array); + virJSONValueFree(parent); + virJSONValueFree(object); + return NULL; +} + #define QEMU_DEFAULT_NBD_PORT "10809"
static char * -qemuBuildNetworkDriveURI(virStorageSourcePtr src, +qemuBuildNetworkDriveStr(virStorageSourcePtr src, const char *username, const char *secret) { char *ret = NULL; + char *str = NULL; virBuffer buf = VIR_BUFFER_INITIALIZER; virURIPtr uri = NULL; + virJSONValuePtr json = NULL; size_t i;
switch ((virStorageNetProtocol) src->protocol) { @@ -3331,61 +3421,67 @@ qemuBuildNetworkDriveURI(virStorageSourcePtr src, case VIR_STORAGE_NET_PROTOCOL_TFTP: case VIR_STORAGE_NET_PROTOCOL_ISCSI: case VIR_STORAGE_NET_PROTOCOL_GLUSTER: So this is done for all protocols that originally use an URI ...
- if (src->nhosts != 1) { - virReportError(VIR_ERR_INTERNAL_ERROR, - _("protocol '%s' accepts only one host"), - virStorageNetProtocolTypeToString(src->protocol)); - goto cleanup; - } - - if (VIR_ALLOC(uri) < 0) - goto cleanup; - - if (src->hosts->transport == VIR_STORAGE_NET_HOST_TRANS_TCP) { - if (VIR_STRDUP(uri->scheme, - virStorageNetProtocolTypeToString(src->protocol)) < 0) + if (src->nhosts == 1) { + /* URI syntax generation */ + if (VIR_ALLOC(uri) < 0) goto cleanup; - } else { - if (virAsprintf(&uri->scheme, "%s+%s", - virStorageNetProtocolTypeToString(src->protocol), - virStorageNetHostTransportTypeToString(src->hosts->transport)) < 0) - goto cleanup; - } - - if ((uri->port = qemuNetworkDriveGetPort(src->protocol, src->hosts->port)) < 0) - goto cleanup;
- if (src->path) { - if (src->volume) { - if (virAsprintf(&uri->path, "/%s%s", - src->volume, src->path) < 0) + if (src->hosts->transport == VIR_STORAGE_NET_HOST_TRANS_TCP) { + if (VIR_STRDUP(uri->scheme, + virStorageNetProtocolTypeToString(src->protocol)) < 0) goto cleanup; } else { - if (virAsprintf(&uri->path, "%s%s", - src->path[0] == '/' ? "" : "/", - src->path) < 0) + if (virAsprintf(&uri->scheme, "%s+%s", + virStorageNetProtocolTypeToString(src->protocol), + virStorageNetHostTransportTypeToString(src->hosts->transport)) < 0) goto cleanup; } - }
- if (src->hosts->socket && - virAsprintf(&uri->query, "socket=%s", src->hosts->socket) < 0) - goto cleanup; + if ((uri->port = qemuNetworkDriveGetPort(src->protocol, src->hosts->port)) < 0) + goto cleanup;
- if (username) { - if (secret) { - if (virAsprintf(&uri->user, "%s:%s", username, secret) < 0) - goto cleanup; - } else { - if (VIR_STRDUP(uri->user, username) < 0) - goto cleanup; + if (src->path) { + if (src->volume) { + if (virAsprintf(&uri->path, "/%s%s", + src->volume, src->path) < 0) + goto cleanup; + } else { + if (virAsprintf(&uri->path, "%s%s", + src->path[0] == '/' ? "" : "/", + src->path) < 0) + goto cleanup; + } } - }
- if (VIR_STRDUP(uri->server, src->hosts->name) < 0) - goto cleanup; + if (src->hosts->socket && + virAsprintf(&uri->query, "socket=%s", src->hosts->socket) < 0) + goto cleanup;
- ret = virURIFormat(uri); + if (username) { + if (secret) { + if (virAsprintf(&uri->user, "%s:%s", username, secret) < 0) + goto cleanup; + } else { + if (VIR_STRDUP(uri->user, username) < 0) + goto cleanup; + } + } + + if (VIR_STRDUP(uri->server, src->hosts->name) < 0) + goto cleanup; + + ret = virURIFormat(uri); + } else { + /* switch to new json formated syntax */ ... but you do Gluster here unconditionally. This would actually turn a HTTP (or other) disk definition into a gluster source. That can't happen, this either needs to go into a separate section under the VIR_STORAGE_NET_PROTOCOL_GLUSTER or the JSON generator needs to be able to generate json for all the protocols.
+ if(!(json = qemuBuildGlusterDriveJSON(src))) + goto cleanup; + + if (!(str = virJSONValueToString(json, false))) + goto cleanup; + + if (virAsprintf (&ret, "json:%s", str) < 0) + goto cleanup; + }
break;
So while all the above looks pretty okay at this point (except for clearing the image type) this patch is incomplete.
Since libvirt is loading the backing chain to be able to traverse it and set correct permissions on individual images. This equals to two additional parts of the code that need modification:
1) virStorageFileBackendGlusterInit has the same "src->nhosts != 1" condition and is not prepared to work with multiple protocols
IIUC this is part of the storage pool feature of libvirt and that can be handled as part of a separate patch. Using gluster as a block backend for qemudoesn't enforce using gluster as a file backend for libvirt storage pool, they both are separate, IMHO
2) I presume (I didn't test it yet) that the qemu patch that adds this stuff will result into using the "json:{" protocol string in the 'backing_store' field once you do a snapshot of a gluster disk that uses this new syntax. If that happens the VM will not be able to start in libvirt any more, as libvirt does not have a 'json:{' protocol parser to parse the backing store.
We tried to take a disk-only external snapshot with libvirtd running with this patch and I was unable to see the "json:{..." format for backing file. I think we understand what you are saying, but we are unable to reproduce the problem ( of having the json way in backing file). This is what we did .... * Domain HFM running with CentOs7.qcow2 as a network disk type w/ protocol=gluster 0 :) dhcp1-86 ~/deepakcs $ virsh list --all Id Name State ---------------------------------------------------- 4 HFM running * Create a qcow2 file for use with external snapshot qemu-img create -f qcow2 -b gluster://10.70.1.86/sample/CentOs7.qcow2 gluster://10.70.1.86/sample/newsnap.qcow2 * Create a domainsnapshot XML (snap.xml) <domainsnapshot> <disks> <disk name="vda" snapshot="external" type="network"> <source protocol="gluster" name="sample/newsnap.qcow2"> <host name="10.70.1.86" port="24007"/> <host name="10.70.1.88" port="24007"/> <host name="10.70.1.87" port="24007"/> </source> </disk> </disks> </domainsnapshot> * Run virsh snap cmd (error in bold) 0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: missing storage backend for network files using gluster protocol* * Run virsh snap cmd w/ domain is off (error in bold) 0 :) dhcp1-86 ~/deepakcs $ virsh list --all Id Name State ---------------------------------------------------- - HFM shut off 0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: external inactive snapshots are not supported on 'network' disks using 'gluster' protocol*
So in order to accept this patch, the helpers in src/util/virstoragefile.c (virStorageSourceParseBackingURI) will need to be made modular so that they can accept a driver specific callback, that will then parse the 'json:{' source definitions. (We can't just hardcode it there, since src/util "should" not contain any qemu specific code.)
Questions: 1) Why does snapshot doesn't work, is it bcos the support for json is not present in the backup parse code ? The error msg seen above, isn't clear 2) IIUC, you mean that the backing file will be in the json format something like ... backing file: json"{...}" in `qemu-img info --backing-chain` output ? How to create such a scenario, so that we can "see" the problem happening and hence debug/fix it :) thanx, deepak

On Mon, Oct 05, 2015 at 18:59:54 +0530, Deepak C Shetty wrote:
[Note: Formatting of this mail is broken. Please configure your mail client to properly flow text in the plaintext mode.]
On 10/05/2015 04:31 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
[snip]
So while all the above looks pretty okay at this point (except for clearing the image type) this patch is incomplete.
Since libvirt is loading the backing chain to be able to traverse it and set correct permissions on individual images. This equals to two additional parts of the code that need modification:
1) virStorageFileBackendGlusterInit has the same "src->nhosts != 1" condition and is not prepared to work with multiple protocols
IIUC this is part of the storage pool feature of libvirt and that can be handled as part of a separate patch. Using gluster as a block backend for qemudoesn't enforce using gluster as a file backend for libvirt storage pool, they both are separate, IMHO
No, the part of the storage driver I'm refering to is used in the backing chain detection. So even if you don't use the libvirt's gluster storage pool, the code I'm refering to will be invoked. It can be a separate patch, but it needs to go in before this patch lands.
2) I presume (I didn't test it yet) that the qemu patch that adds this stuff will result into using the "json:{" protocol string in the 'backing_store' field once you do a snapshot of a gluster disk that uses this new syntax. If that happens the VM will not be able to start in libvirt any more, as libvirt does not have a 'json:{' protocol parser to parse the backing store.
We tried to take a disk-only external snapshot with libvirtd running with this patch and I was unable to see the "json:{..." format for backing file.
I think we understand what you are saying, but we are unable to reproduce the problem ( of having the json way in backing file). This is what we did ....
* Domain HFM running with CentOs7.qcow2 as a network disk type w/ protocol=gluster
0 :) dhcp1-86 ~/deepakcs $ virsh list --all Id Name State ---------------------------------------------------- 4 HFM running
* Create a qcow2 file for use with external snapshot
qemu-img create -f qcow2 -b gluster://10.70.1.86/sample/CentOs7.qcow2 gluster://10.70.1.86/sample/newsnap.qcow2
If you pre-create backing files to use with --reuse-external, you have to use the json syntax here to express the backing file.
* Create a domainsnapshot XML (snap.xml)
<domainsnapshot> <disks> <disk name="vda" snapshot="external" type="network"> <source protocol="gluster" name="sample/newsnap.qcow2"> <host name="10.70.1.86" port="24007"/> <host name="10.70.1.88" port="24007"/> <host name="10.70.1.87" port="24007"/> </source> </disk> </disks> </domainsnapshot>
You are doing it the other way around. I was speaking of doing a VM started with a multi-server gluster volume and then doing a snapshot wherever you like. This should result in a snapshot that has json definition of the backing file.
* Run virsh snap cmd (error in bold)
[Note: using HTML mails on technical lists is not a good idea.]
0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: missing storage backend for network files using gluster protocol*
Erm, how are you attempting to test this if you don't even have libvirt with gluster support installed?
* Run virsh snap cmd w/ domain is off (error in bold)
0 :) dhcp1-86 ~/deepakcs $ virsh list --all Id Name State ---------------------------------------------------- - HFM shut off
0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: external inactive snapshots are not supported on 'network' disks using 'gluster' protocol*
This is expected, inactive external snapshots have only a very limited set of supported configurations. Networked storage is not there.
So in order to accept this patch, the helpers in src/util/virstoragefile.c (virStorageSourceParseBackingURI) will need to be made modular so that they can accept a driver specific callback, that will then parse the 'json:{' source definitions. (We can't just hardcode it there, since src/util "should" not contain any qemu specific code.)
Questions: 1) Why does snapshot doesn't work, is it bcos the support for json is not present in the backup parse code ? The error msg seen above, isn't clear
Well the error message says that you have libvirt without gluster support and thus it refuses to do a snapshot on a gluster disk.
2) IIUC, you mean that the backing file will be in the json format something like ... backing file: json"{...}" in `qemu-img info --backing-chain` output ?
Exactly.
How to create such a scenario, so that we can "see" the problem happening and hence debug/fix it :)
Start a VM with the multi-host gluster support (as of by this patch) and create an external snapshot even on local disk, no need to go to gluster again. Peter

On 10/05/2015 07:33 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 18:59:54 +0530, Deepak C Shetty wrote: [Note: Formatting of this mail is broken. Please configure your mail client to properly flow text in the plaintext mode.]
Sorry about that. I configured plain text format for libvir-list@redhat.com domain in my thunderbird client and selected plain text as delivery format for this specificmail.
On 10/05/2015 04:31 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote: [snip]
So while all the above looks pretty okay at this point (except for clearing the image type) this patch is incomplete.
Since libvirt is loading the backing chain to be able to traverse it and set correct permissions on individual images. This equals to two additional parts of the code that need modification:
1) virStorageFileBackendGlusterInit has the same "src->nhosts != 1" condition and is not prepared to work with multiple protocols IIUC this is part of the storage pool feature of libvirt and that can be handled as part of a separate patch. Using gluster as a block backend for qemudoesn't enforce using gluster as a file backend for libvirt storage pool, they both are separate, IMHO No, the part of the storage driver I'm refering to is used in the backing chain detection. So even if you don't use the libvirt's gluster storage pool, the code I'm refering to will be invoked.
It can be a separate patch, but it needs to go in before this patch lands.
2) I presume (I didn't test it yet) that the qemu patch that adds this stuff will result into using the "json:{" protocol string in the 'backing_store' field once you do a snapshot of a gluster disk that uses this new syntax. If that happens the VM will not be able to start in libvirt any more, as libvirt does not have a 'json:{' protocol parser to parse the backing store. We tried to take a disk-only external snapshot with libvirtd running with this patch and I was unable to see the "json:{..." format for backing file.
I think we understand what you are saying, but we are unable to reproduce the problem ( of having the json way in backing file). This is what we did ....
* Domain HFM running with CentOs7.qcow2 as a network disk type w/ protocol=gluster
0 :) dhcp1-86 ~/deepakcs $ virsh list --all Id Name State ---------------------------------------------------- 4 HFM running
* Create a qcow2 file for use with external snapshot
qemu-img create -f qcow2 -b gluster://10.70.1.86/sample/CentOs7.qcow2 gluster://10.70.1.86/sample/newsnap.qcow2 If you pre-create backing files to use with --reuse-external, you have to use the json syntax here to express the backing file.
We tried that, but qemu-img says invalid protocol/format when given a json:"{...}" string in the above cmd, for backing and new file names. Qemu being used is built from source using the qemu patch thats referenced with the link below, that has json support for gluster network storage.
* Create a domainsnapshot XML (snap.xml)
<domainsnapshot> <disks> <disk name="vda" snapshot="external" type="network"> <source protocol="gluster" name="sample/newsnap.qcow2"> <host name="10.70.1.86" port="24007"/> <host name="10.70.1.88" port="24007"/> <host name="10.70.1.87" port="24007"/> </source> </disk> </disks> </domainsnapshot> You are doing it the other way around. I was speaking of doing a VM started with a multi-server gluster volume and then doing a snapshot wherever you like. This should result in a snapshot that has json definition of the backing file.
Not clear what you mean by 'other way around' ? A VM was created (HFM seen above) using libvirt with this patch. The HFM's domain xml disk element is of type network & has 3 host's (hence multi-server) entries. Also not clear on what you mean by 'whereeve u like' :) Can you pls elaborate or provide the high level virsh steps that we need to use ? We are using virsh which is again built as part of the libvirt src build with this patch
* Run virsh snap cmd (error in bold) [Note: using HTML mails on technical lists is not a good idea.]
Sorry again, this mail should be in plain text
0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: missing storage backend for network files using gluster protocol* Erm, how are you attempting to test this if you don't even have libvirt with gluster support installed?
Hmm, what makes u think we don't have libvirt with gluster support installed ? libvirtd is running from source with this patch included. Are we missing anything ?
* Run virsh snap cmd w/ domain is off (error in bold)
0 :) dhcp1-86 ~/deepakcs $ virsh list --all Id Name State ---------------------------------------------------- - HFM shut off
0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: external inactive snapshots are not supported on 'network' disks using 'gluster' protocol* This is expected, inactive external snapshots have only a very limited set of supported configurations. Networked storage is not there.
So in order to accept this patch, the helpers in src/util/virstoragefile.c (virStorageSourceParseBackingURI) will need to be made modular so that they can accept a driver specific callback, that will then parse the 'json:{' source definitions. (We can't just hardcode it there, since src/util "should" not contain any qemu specific code.) Questions: 1) Why does snapshot doesn't work, is it bcos the support for json is not present in the backup parse code ? The error msg seen above, isn't clear Well the error message says that you have libvirt without gluster support and thus it refuses to do a snapshot on a gluster disk.
2) IIUC, you mean that the backing file will be in the json format something like ... backing file: json"{...}" in `qemu-img info --backing-chain` output ?
Exactly.
How to create such a scenario, so that we can "see" the problem happening and hence debug/fix it :)
Start a VM with the multi-host gluster support (as of by this patch) and create an external snapshot even on local disk, no need to go to gluster again.
HFM is running w/ libvirtd and qemu buitl from sources with the respective patches included Can you pls elaborate on "even on local disk" ? thanx, deepak

On Tue, Oct 06, 2015 at 10:38:23 +0530, Deepak C Shetty wrote:
On 10/05/2015 07:33 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 18:59:54 +0530, Deepak C Shetty wrote: [Note: Formatting of this mail is broken. Please configure your mail client to properly flow text in the plaintext mode.]
Sorry about that. I configured plain text format for libvir-list@redhat.com domain in my thunderbird client and selected plain text as delivery format for this specificmail.
Looks better now, but you really should use plain text for any technical stuff.
On 10/05/2015 04:31 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
[snip]
* Create a qcow2 file for use with external snapshot
qemu-img create -f qcow2 -b gluster://10.70.1.86/sample/CentOs7.qcow2 gluster://10.70.1.86/sample/newsnap.qcow2
If you pre-create backing files to use with --reuse-external, you have to use the json syntax here to express the backing file.
We tried that, but qemu-img says invalid protocol/format when given a json:"{...}" string in the above cmd, for backing and new file names.
Qemu being used is built from source using the qemu patch thats referenced with the link below, that has json support for gluster network storage.
Well, that probably means that the QEMU patch isn't entirely right in that case.
* Create a domainsnapshot XML (snap.xml)
<domainsnapshot> <disks> <disk name="vda" snapshot="external" type="network"> <source protocol="gluster" name="sample/newsnap.qcow2"> <host name="10.70.1.86" port="24007"/> <host name="10.70.1.88" port="24007"/> <host name="10.70.1.87" port="24007"/> </source> </disk> </disks> </domainsnapshot> You are doing it the other way around. I was speaking of doing a VM started with a multi-server gluster volume and then doing a snapshot wherever you like. This should result in a snapshot that has json definition of the backing file.
Not clear what you mean by 'other way around' ?
A VM was created (HFM seen above) using libvirt with this patch. The HFM's domain xml disk element is of type network & has 3 host's (hence multi-server) entries.
Also not clear on what you mean by 'whereeve u like' :) Can you pls elaborate or provide the high level virsh steps that we need to use ?
[1] So, as you've stated above you already have a VM that is using a gluster disk, so if you use that one and: virsh snapshot-create-as VMNAME --disk-only --diskspec vda,snapshot=external,file=/tmp/snapshot virsh shutdown or virsh destroy VMNAME virsh start VMNAME So this basically creates a local image file that has a multi-host gluster volume as backing. libvirt needs to be able to successfuly start such VM, since we'd create them. Please also see [2]. Note that the above step modifies the configuration of the VM, so you might want to back it up prior to the steps above.
We are using virsh which is again built as part of the libvirt src build with this patch
This doesn't matter, virsh just forwards everything to the daemon. The daemon has to have the right support.
* Run virsh snap cmd (error in bold) [Note: using HTML mails on technical lists is not a good idea.]
Sorry again, this mail should be in plain text
0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: missing storage backend for network files using gluster protocol* Erm, how are you attempting to test this if you don't even have libvirt with gluster support installed?
Hmm, what makes u think we don't have libvirt with gluster support installed ? libvirtd is running from source with this patch included.
So your configure options or build environment is wrong then. The hint is the error message you've posted. error: internal error: missing storage backend for network files using gluster protocol That is a generic message from the storage driver that is used in case when the appropriate backend is not available. As I've pointed out, it's the part of the storage driver that assists the qemu driver in regular operations like reading the image header or setting permissions on image files, not the actual pool/volume storage driver. [2] Additionally this might be the reason you are not seeing any failures right now. The code that uses the storage backend queries whether the storage backend is actually supported before attempting to use it, so please make sure that your daemon is properly built with gluster support.
Are we missing anything ?
Probably gluster support in the storage driver.
* Run virsh snap cmd w/ domain is off (error in bold)
[snip]
How to create such a scenario, so that we can "see" the problem happening and hence debug/fix it :)
Start a VM with the multi-host gluster support (as of by this patch) and create an external snapshot even on local disk, no need to go to gluster again. HFM is running w/ libvirtd and qemu buitl from sources with the respective patches included
Can you pls elaborate on "even on local disk" ?
See [1]. Peter

On 10/06/2015 02:53 PM, Peter Krempa wrote:
On Tue, Oct 06, 2015 at 10:38:23 +0530, Deepak C Shetty wrote:
On 10/05/2015 07:33 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 18:59:54 +0530, Deepak C Shetty wrote: [Note: Formatting of this mail is broken. Please configure your mail client to properly flow text in the plaintext mode.] Sorry about that. I configured plain text format for libvir-list@redhat.com domain in my thunderbird client and selected plain text as delivery format for this specificmail. Looks better now, but you really should use plain text for any technical stuff.
Yup, willl do.
On 10/05/2015 04:31 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote: [snip]
* Create a qcow2 file for use with external snapshot
qemu-img create -f qcow2 -b gluster://10.70.1.86/sample/CentOs7.qcow2 gluster://10.70.1.86/sample/newsnap.qcow2
If you pre-create backing files to use with --reuse-external, you have to use the json syntax here to express the backing file. We tried that, but qemu-img says invalid protocol/format when given a json:"{...}" string in the above cmd, for backing and new file names.
Qemu being used is built from source using the qemu patch thats referenced with the link below, that has json support for gluster network storage. Well, that probably means that the QEMU patch isn't entirely right in that case.
We just figured .... 1) qemu-img create -f qcow2 -b json:"{...}" gluster://hostname/volname/snapfile.qcow2 - works 2) qemu-img create -f qcow2 -b json:"{.../base.img}" json:"{.../snapfile.qcow2}" - fails saying json: protocol unsupported This is probably bcos qemu-img utility doesn't have support for json: syntax and IIRC in the qemu thread it was said that we don't need to add this to the qemu-img utility as it adds complexity So using #1 we are able to pre-create snapfiles that have backing-file: json string
* Create a domainsnapshot XML (snap.xml)
<domainsnapshot> <disks> <disk name="vda" snapshot="external" type="network"> <source protocol="gluster" name="sample/newsnap.qcow2"> <host name="10.70.1.86" port="24007"/> <host name="10.70.1.88" port="24007"/> <host name="10.70.1.87" port="24007"/> </source> </disk> </disks> </domainsnapshot> You are doing it the other way around. I was speaking of doing a VM started with a multi-server gluster volume and then doing a snapshot wherever you like. This should result in a snapshot that has json definition of the backing file. Not clear what you mean by 'other way around' ?
A VM was created (HFM seen above) using libvirt with this patch. The HFM's domain xml disk element is of type network & has 3 host's (hence multi-server) entries.
Also not clear on what you mean by 'whereeve u like' :) Can you pls elaborate or provide the high level virsh steps that we need to use ? [1]
So, as you've stated above you already have a VM that is using a gluster disk, so if you use that one and:
virsh snapshot-create-as VMNAME --disk-only --diskspec vda,snapshot=external,file=/tmp/snapshot
virsh shutdown or virsh destroy VMNAME
virsh start VMNAME
So this basically creates a local image file that has a multi-host gluster volume as backing. libvirt needs to be able to successfuly start such VM, since we'd create them. Please also see [2]. Note that the above step modifies the configuration of the VM, so you might want to back it up prior to the steps above.
We are using virsh which is again built as part of the libvirt src build with this patch This doesn't matter, virsh just forwards everything to the daemon. The daemon has to have the right support.
* Run virsh snap cmd (error in bold) [Note: using HTML mails on technical lists is not a good idea.] Sorry again, this mail should be in plain text
0 :) dhcp1-86 ~/deepakcs $ virsh snapshot-create HFM --xmlfile ./snap.xml --disk-only --reuse-external *error: internal error: missing storage backend for network files using gluster protocol* Erm, how are you attempting to test this if you don't even have libvirt with gluster support installed? Hmm, what makes u think we don't have libvirt with gluster support installed ? libvirtd is running from source with this patch included. So your configure options or build environment is wrong then. The hint is the error message you've posted.
error: internal error: missing storage backend for network files using gluster protocol
We configured it correctly now --with-gluster and --with-storage-gluster
That is a generic message from the storage driver that is used in case when the appropriate backend is not available.
As I've pointed out, it's the part of the storage driver that assists the qemu driver in regular operations like reading the image header or setting permissions on image files, not the actual pool/volume storage driver.
So just to understand this correctly, the storage driver is called as part of parsing the backing file and yes we do see that it fails when backing file is of json: syntax but works when its URI syntax. This also means that the snapshot operation (of having the VM switch to the new active file) is done on qemu side and when it returns back to libvirt, libvirt is unable to parse as the backing file is json format, so fixing the parse URI to support parseJSON needs to be done. What happens when we don't use --reuse-external. IIUC the newfile is created by libvirt so which part of libvirt needs to be fixed so ensure that libvirt creates the file using json: syntax and not URI syntax ? The above qemu-img snip shows that json is only supported as part of -b option only, so will that be a inhibitor for libvirt when creating new files ?
[2]
Additionally this might be the reason you are not seeing any failures right now. The code that uses the storage backend queries whether the storage backend is actually supported before attempting to use it, so please make sure that your daemon is properly built with gluster support.
pkalever is now working with libvirtd having gluster support We will know more as we make more experiments... the whole gamut of network disk support in qemu/libvirt is confusing esp with the addn of json and URI syntax both :) So pls bear with our questions above :) thanx, deepak

On Tue, Oct 06, 2015 at 15:35:05 +0530, Deepak C Shetty wrote:
On 10/06/2015 02:53 PM, Peter Krempa wrote:
On Tue, Oct 06, 2015 at 10:38:23 +0530, Deepak C Shetty wrote:
On 10/05/2015 07:33 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 18:59:54 +0530, Deepak C Shetty wrote:
On 10/05/2015 04:31 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote:
[...]
We just figured ....
1) qemu-img create -f qcow2 -b json:"{...}" gluster://hostname/volname/snapfile.qcow2 - works
Okay, you can use it as the backing store string ...
2) qemu-img create -f qcow2 -b json:"{.../base.img}" json:"{.../snapfile.qcow2}" - fails saying json: protocol unsupported
... but not as the image itself. That is workable, since you could emulate the behavior by re-trying the command with different single-server URIs.
This is probably bcos qemu-img utility doesn't have support for json: syntax and IIRC in the qemu thread it was said that we don't need to add this to the qemu-img utility as it adds complexity
So using #1 we are able to pre-create snapfiles that have backing-file: json string
[...]
gluster protocol* Erm, how are you attempting to test this if you don't even have libvirt with gluster support installed? Hmm, what makes u think we don't have libvirt with gluster support installed ? libvirtd is running from source with this patch included. So your configure options or build environment is wrong then. The hint is the error message you've posted.
error: internal error: missing storage backend for network files using gluster protocol
We configured it correctly now --with-gluster and --with-storage-gluster
That is a generic message from the storage driver that is used in case when the appropriate backend is not available.
As I've pointed out, it's the part of the storage driver that assists the qemu driver in regular operations like reading the image header or setting permissions on image files, not the actual pool/volume storage driver.
So just to understand this correctly, the storage driver is called as part of parsing the backing file and yes we do see that it fails when backing file is of json: syntax but works when its URI syntax.
Well, this is the thing I've pointed out in the original review.
This also means that the snapshot operation (of having the VM switch to the new active file) is done on qemu side and when it returns back to libvirt, libvirt is unable to parse as the backing file is json format, so fixing the parse URI to support parseJSON needs to be done.
Exactly, that's why I've requested it.
What happens when we don't use --reuse-external. IIUC the newfile is
If you don't use --reuse-external, then the file is pre-created by libvirt and the image metadata (including the backing store string) are filled by the qemu process once we start the "blockdev-snapshot-sync" operation in qemu.
created by libvirt so which part of libvirt needs to be fixed so ensure that libvirt creates the file using json: syntax and not URI syntax ? The above
No, this happens in qemu. Libvirt needs to be able to process that file on next start of the VM.
qemu-img snip shows that json is only supported as part of -b option only, so will that be a inhibitor for libvirt when creating new files ?
Not at this point, but we might want to use it in the future in that way.
[2]
Additionally this might be the reason you are not seeing any failures right now. The code that uses the storage backend queries whether the storage backend is actually supported before attempting to use it, so please make sure that your daemon is properly built with gluster support.
pkalever is now working with libvirtd having gluster support
(Shouldn't that be a pre-requisite for posting a patch like this?)
We will know more as we make more experiments... the whole gamut of network disk support in qemu/libvirt is confusing esp with the addn of json and URI syntax both :) So pls bear with our questions above :)
Peter

On 10/06/2015 06:12 PM, Peter Krempa wrote:
On Tue, Oct 06, 2015 at 15:35:05 +0530, Deepak C Shetty wrote:
On 10/06/2015 02:53 PM, Peter Krempa wrote:
On Tue, Oct 06, 2015 at 10:38:23 +0530, Deepak C Shetty wrote:
On 10/05/2015 07:33 PM, Peter Krempa wrote:
On Mon, Oct 05, 2015 at 18:59:54 +0530, Deepak C Shetty wrote:
On 10/05/2015 04:31 PM, Peter Krempa wrote: > On Mon, Oct 05, 2015 at 15:30:42 +0530, Prasanna Kumar Kalever wrote: [...]
We just figured ....
1) qemu-img create -f qcow2 -b json:"{...}" gluster://hostname/volname/snapfile.qcow2 - works Okay, you can use it as the backing store string ...
2) qemu-img create -f qcow2 -b json:"{.../base.img}" json:"{.../snapfile.qcow2}" - fails saying json: protocol unsupported ... but not as the image itself. That is workable, since you could emulate the behavior by re-trying the command with different single-server URIs.
This is probably bcos qemu-img utility doesn't have support for json: syntax and IIRC in the qemu thread it was said that we don't need to add this to the qemu-img utility as it adds complexity
So using #1 we are able to pre-create snapfiles that have backing-file: json string [...]
gluster protocol* Erm, how are you attempting to test this if you don't even have libvirt with gluster support installed? Hmm, what makes u think we don't have libvirt with gluster support installed ? libvirtd is running from source with this patch included. So your configure options or build environment is wrong then. The hint is the error message you've posted.
error: internal error: missing storage backend for network files using gluster protocol We configured it correctly now --with-gluster and --with-storage-gluster
That is a generic message from the storage driver that is used in case when the appropriate backend is not available.
As I've pointed out, it's the part of the storage driver that assists the qemu driver in regular operations like reading the image header or setting permissions on image files, not the actual pool/volume storage driver. So just to understand this correctly, the storage driver is called as part of parsing the backing file and yes we do see that it fails when backing file is of json: syntax but works when its URI syntax. Well, this is the thing I've pointed out in the original review.
Yes and many thanks for that. I just put it there to confirm our understanding as we are slowly inching towards being on the same page with you :)
This also means that the snapshot operation (of having the VM switch to the new active file) is done on qemu side and when it returns back to libvirt, libvirt is unable to parse as the backing file is json format, so fixing the parse URI to support parseJSON needs to be done. Exactly, that's why I've requested it.
Great to be on same page with you till here :)
What happens when we don't use --reuse-external. IIUC the newfile is If you don't use --reuse-external, then the file is pre-created by libvirt and the image metadata (including the backing store string) are filled by the qemu process once we start the "blockdev-snapshot-sync" operation in qemu.
This is still not clear to me. When libvirt pre-creates the file, how does it do it ? Using URI or json syntax ? Since libvirt needs to know the way to talk with glusterfs, which is either using URI or json, so its still not clear to me how libvirt is able to pre-create the file when --reuse-external is absent.
created by libvirt so which part of libvirt needs to be fixed so ensure that libvirt creates the file using json: syntax and not URI syntax ? The above No, this happens in qemu. Libvirt needs to be able to process that file on next start of the VM.
Why on next start only ? When snap creation is successfull from qemu, the backing file will be json syntax, so when the flow comes back to libvirt, it needs to parse json syntax in order to update the backingStore info in the domain XML, right ?
qemu-img snip shows that json is only supported as part of -b option only, so will that be a inhibitor for libvirt when creating new files ? Not at this point, but we might want to use it in the future in that way.
So when libvirt pre-creates the file, i thought it will use: qemu-img create -f qcow2 (either json: or URI syntax) dependingon the support present in libvirt code, right ?
[2]
Additionally this might be the reason you are not seeing any failures right now. The code that uses the storage backend queries whether the storage backend is actually supported before attempting to use it, so please make sure that your daemon is properly built with gluster support. pkalever is now working with libvirtd having gluster support (Shouldn't that be a pre-requisite for posting a patch like this?)
Yes, we had some issues with libgfapi, so libvirtd with those configure options wasn't starting, so we were working w/o them, and it worked until we needed to create a snap. Now we are fine. thanx, deepak
We will know more as we make more experiments... the whole gamut of network disk support in qemu/libvirt is confusing esp with the addn of json and URI syntax both :) So pls bear with our questions above :) Peter

On Tue, Oct 06, 2015 at 18:54:25 +0530, Deepak C Shetty wrote:
On 10/06/2015 06:12 PM, Peter Krempa wrote:
On Tue, Oct 06, 2015 at 15:35:05 +0530, Deepak C Shetty wrote:
On 10/06/2015 02:53 PM, Peter Krempa wrote:
On Tue, Oct 06, 2015 at 10:38:23 +0530, Deepak C Shetty wrote:
[...]
What happens when we don't use --reuse-external. IIUC the newfile is If you don't use --reuse-external, then the file is pre-created by libvirt and the image metadata (including the backing store string) are filled by the qemu process once we start the "blockdev-snapshot-sync" operation in qemu.
This is still not clear to me. When libvirt pre-creates the file, how does it do it ? Using URI or json syntax ? Since libvirt needs to know the way to talk with glusterfs, which is either using URI or json, so its still not clear to me how libvirt is able to pre-create the file when --reuse-external is absent.
I suggest you actually read the code if you are going to work on this. Good entry points would be: qemuDomainSnapshotCreateActiveExternal (top level snapshot function) qemuDomainSnapshotCreateDiskActive (disk snapshot sub-part) qemuDomainSnapshotCreateSingleDiskActive (singe disk image sub-part) The last mentioned function is probably the most relevant one in this case. The last function calls 'virStorageFileCreate' which is another good read to start. The backend it calls is 'virStorageFileBackendGlusterCreate'. Yet another good read in this case is qemuDomainDetermineDiskChain and the call tree below it.
created by libvirt so which part of libvirt needs to be fixed so ensure that libvirt creates the file using json: syntax and not URI syntax ? The above No, this happens in qemu. Libvirt needs to be able to process that file on next start of the VM.
Why on next start only ?
Okay, not actually at start only ...
When snap creation is successfull from qemu, the backing file will be json syntax, so when the flow comes back to libvirt, it needs to parse json syntax in order to update the backingStore info in the domain XML, right ?
... snapshot creation code is actually able to manipulate the backing data tracked internally in the proper way, but the data is then overriden by the reloaded data.
qemu-img snip shows that json is only supported as part of -b option only, so will that be a inhibitor for libvirt when creating new files ? Not at this point, but we might want to use it in the future in that way.
So when libvirt pre-creates the file, i thought it will use: qemu-img create -f qcow2 (either json: or URI syntax) dependingon the support present in libvirt code, right ?
See the functions mentioned above for an answer. Peter

[trimming the cc: list] On Mon, Oct 05, 2015 at 03:30:42PM +0530, Prasanna Kumar Kalever wrote:
currently libvirt has the capability to parse only one host and convert that into URI formatted string, with the help of this patch libvirt will be able to parse multiple hosts from the domain xml and can convert that into JSON formatted string
We have a public bug for multiple gluster host support: https://bugzilla.redhat.com/show_bug.cgi?id=1247521 Including the link in the commit message is helpful for people reading it in the future. Jan
participants (5)
-
Deepak C Shetty
-
Ján Tomko
-
Kevin Wolf
-
Peter Krempa
-
Prasanna Kumar Kalever