[libvirt] [PATCH] storage: Don't update pool available/allocation pool if vol-create-as fails

https://bugzilla.redhat.com/show_bug.cgi?id=1024159 If adding a volume to a storage pool fails during the CreateXML or CreateXMLFrom API's, we don't want to adjust the available and allocation values for the storage pool during storageVolDelete since we haven't adjusted the values for the create. Refactor storageVolDelete() a bit to create a storageVolDeleteInternal() which will handle the primary deletion activities. Add a parameter updateMeta which will signify whether to update the values or not. Adjust the calls from CreateXML and CreateXMLFrom to directly call the DeleteInternal with the pool lock held. This does bypass the call to virStorageVolDeleteEnsureACL(). Signed-off-by: John Ferlan <jferlan@redhat.com> --- I did try to make storageVolDelete() just be a shim to storageVolDeleteInternal(), but ran afoul of check-aclrules.pl since the EnsureAcl wasn't in storageVolDelete(). src/storage/storage_driver.c | 83 +++++++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 31 deletions(-) diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c index 2f98f78..6b14b1b 100644 --- a/src/storage/storage_driver.c +++ b/src/storage/storage_driver.c @@ -1553,6 +1553,51 @@ storageVolLookupByPath(virConnectPtr conn, static int +storageVolDeleteInternal(virStorageVolPtr obj, + virStorageBackendPtr backend, + virStoragePoolObjPtr pool, + virStorageVolDefPtr vol, + unsigned int flags, + bool updateMeta) +{ + size_t i; + int ret = -1; + + if (!backend->deleteVol) { + virReportError(VIR_ERR_NO_SUPPORT, + "%s", _("storage pool does not support vol deletion")); + + goto cleanup; + } + + if (backend->deleteVol(obj->conn, pool, vol, flags) < 0) + goto cleanup; + + /* Update pool metadata - don't update meta data from error paths + * in this module since the allocation/available weren't adjusted yet + */ + if (updateMeta) { + pool->def->allocation -= vol->target.allocation; + pool->def->available += vol->target.allocation; + } + + for (i = 0; i < pool->volumes.count; i++) { + if (pool->volumes.objs[i] == vol) { + VIR_INFO("Deleting volume '%s' from storage pool '%s'", + vol->name, pool->def->name); + virStorageVolDefFree(vol); + + VIR_DELETE_ELEMENT(pool->volumes.objs, i, pool->volumes.count); + break; + } + } + ret = 0; + + cleanup: + return ret; +} + +static int storageVolDelete(virStorageVolPtr obj, unsigned int flags) { @@ -1560,7 +1605,6 @@ storageVolDelete(virStorageVolPtr obj, virStoragePoolObjPtr pool; virStorageBackendPtr backend; virStorageVolDefPtr vol = NULL; - size_t i; int ret = -1; storageDriverLock(driver); @@ -1602,30 +1646,9 @@ storageVolDelete(virStorageVolPtr obj, goto cleanup; } - if (!backend->deleteVol) { - virReportError(VIR_ERR_NO_SUPPORT, - "%s", _("storage pool does not support vol deletion")); - - goto cleanup; - } - - if (backend->deleteVol(obj->conn, pool, vol, flags) < 0) + if (storageVolDeleteInternal(obj, backend, pool, vol, flags, true) < 0) goto cleanup; - /* Update pool metadata */ - pool->def->allocation -= vol->target.allocation; - pool->def->available += vol->target.allocation; - - for (i = 0; i < pool->volumes.count; i++) { - if (pool->volumes.objs[i] == vol) { - VIR_INFO("Deleting volume '%s' from storage pool '%s'", - vol->name, pool->def->name); - virStorageVolDefFree(vol); - - VIR_DELETE_ELEMENT(pool->volumes.objs, i, pool->volumes.count); - break; - } - } ret = 0; cleanup: @@ -1634,7 +1657,6 @@ storageVolDelete(virStorageVolPtr obj, return ret; } - static virStorageVolPtr storageVolCreateXML(virStoragePoolPtr obj, const char *xmldesc, @@ -1738,9 +1760,9 @@ storageVolCreateXML(virStoragePoolPtr obj, voldef = NULL; if (buildret < 0) { - virStoragePoolObjUnlock(pool); - storageVolDelete(volobj, 0); - pool = NULL; + storageVolDeleteInternal(volobj, backend, pool, buildvoldef, + 0, false); + buildvoldef = NULL; goto cleanup; } @@ -1908,7 +1930,6 @@ storageVolCreateXMLFrom(virStoragePoolPtr obj, origvol->building = 0; newvol->building = 0; allocation = newvol->target.allocation; - newvol = NULL; pool->asyncjobs--; if (origpool) { @@ -1918,11 +1939,11 @@ storageVolCreateXMLFrom(virStoragePoolPtr obj, } if (buildret < 0) { - virStoragePoolObjUnlock(pool); - storageVolDelete(volobj, 0); - pool = NULL; + storageVolDeleteInternal(volobj, backend, pool, newvol, 0, false); + newvol = NULL; goto cleanup; } + newvol = NULL; /* Updating pool metadata */ pool->def->allocation += allocation; -- 1.9.0

On 04/08/2014 05:38 PM, John Ferlan wrote:
https://bugzilla.redhat.com/show_bug.cgi?id=1024159
If adding a volume to a storage pool fails during the CreateXML or CreateXMLFrom API's, we don't want to adjust the available and allocation values for the storage pool during storageVolDelete since we haven't adjusted the values for the create.
Refactor storageVolDelete() a bit to create a storageVolDeleteInternal() which will handle the primary deletion activities. Add a parameter updateMeta which will signify whether to update the values or not.
Adjust the calls from CreateXML and CreateXMLFrom to directly call the DeleteInternal with the pool lock held. This does bypass the call to virStorageVolDeleteEnsureACL().
Signed-off-by: John Ferlan <jferlan@redhat.com> --- I did try to make storageVolDelete() just be a shim to storageVolDeleteInternal(), but ran afoul of check-aclrules.pl since the EnsureAcl wasn't in storageVolDelete().
Yeah, the EnsureAcl has to be in the function matching the public API, but the bulk of the work can indeed be in the internal helper function.
@@ -1634,7 +1657,6 @@ storageVolDelete(virStorageVolPtr obj, return ret; }
- static virStorageVolPtr
Spurious whitespace change. ACK with that fix. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On 04/08/2014 09:44 PM, Eric Blake wrote:
Yeah, the EnsureAcl has to be in the function matching the public API, but the bulk of the work can indeed be in the internal helper function.
@@ -1634,7 +1657,6 @@ storageVolDelete(virStorageVolPtr obj, return ret; }
- static virStorageVolPtr
Spurious whitespace change.
ACK with that fix.
Made the change and pushed - thanks John
participants (2)
-
Eric Blake
-
John Ferlan