Most code paths prevent starting a blockjob if we already have one but
the job registering function does not do this check. While this isn't a
problem for regular cases we had a bad test case where we registered two
jobs for a single disk which leaked one of the jobs. Prevent this in the
registering function until we allow having multiple jobs per disk.
Signed-off-by: Peter Krempa <pkrempa(a)redhat.com>
---
src/qemu/qemu_blockjob.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c
index a991309ee7..80d0269128 100644
--- a/src/qemu/qemu_blockjob.c
+++ b/src/qemu/qemu_blockjob.c
@@ -143,6 +143,12 @@ qemuBlockJobRegister(qemuBlockJobDataPtr job,
{
qemuDomainObjPrivatePtr priv = vm->privateData;
+ if (disk && QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("disk '%s' has a blockjob assigned"),
disk->dst);
+ return -1;
+ }
+
if (virHashAddEntry(priv->blockjobs, job->name, virObjectRef(job)) < 0) {
virObjectUnref(job);
return -1;
--
2.21.0