This is the bare minimum to end a copy job (of course, until a
later patch adds the ability to start a copy job, this patch
doesn't do much in isolation; I've just split the patches to
ease the review).
This patch intentionally avoids SELinux, lock manager, and audit
actions. Also, if libvirtd restarts at the exact moment that a
'drive-reopen' is in flight, the proposed proper way to detect the
outcome of that 'drive-reopen' would be to first pass in a witness
fd with 'getfd', then at libvirtd restart, probe whether that file
is still empty. This patch is enough to test the common case of
success when used correctly, while saving the subtleties of proper
cleanup for worst-case errors for later.
When a mirror job is started, cancelling the job safely reverts back
to the source disk, regardless of whether the destination is in
phase 1 (streaming, in which case the destination is worthless) or
phase 2 (mirroring, in which case the destination is synced up to
the source at the time of the cancel). Our existing code does just
fine in either phase, other than some bookkeeping cleanup; this
implements live block copy, even if qemu 1.1 lacks 'drive-reopen'.
Pivoting the job requires the use of the new 'drive-reopen' command.
Here, failure of the command is potentially catastrophic to the
domain, since the initial qemu implementation rips out the old disk
before attempting to open the new one; qemu will attempt a recovery
path of retrying the reopen on the original source, but if that also
fails, the domain is hosed, with nothing libvirt can do about it.
Ideas for future enhancements via new flags:
If qemu 1.2 ever adds 'drive-reopen' inside 'transaction', then the
problem will no longer exist (a transaction promises not to close
the old file until after the new file is proven to work), at which
point we would add a VIR_DOMAIN_REBASE_COPY_ATOMIC that fails up
front if we detect an older qemu with the risky drive-reopen. We
may also want to add a flag that fails up front if there is no
reopen support at all, rather than waiting until the entire copy
is done only to find out that pivot always fails.
Interesting side note: while snapshot-create --disk-only creates a
copy of the disk at a point in time by moving the domain on to a
new file (the copy is the file now in the just-extended backing
chain), blockjob --abort of a copy job creates a copy of the disk
while keeping the domain on the original file. There may be
potential improvements to the snapshot code to exploit block copy
over multiple disks all at one point in time. And, if
'block-job-cancel' were made part of 'transaction', you could
copy multiple disks at the same point in time without pausing
the domain. This also implies we may want to add a --quiesce flag
to virDomainBlockJobAbort, so that when breaking a mirror (whether
by cancel or pivot), the side of the mirror that we are abandoning
is at least in a stable state with regards to guest I/O.
* src/qemu/qemu_driver.c (qemuDomainBlockJobAbort): Accept new flag.
(qemuDomainBlockPivot): New helper function.
(qemuDomainBlockJobImpl): Implement it.
---
v6: add probe for drive-reopen at pivot time, so that it is
possible to support copy but not pivot with just drive-mirror
src/qemu/qemu_driver.c | 112 +++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 111 insertions(+), 1 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e1584c6..e562844 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -11640,6 +11640,86 @@ cleanup:
return ret;
}
+/* Called while holding the VM job lock, to implement a block job
+ * abort with pivot; this updates the VM definition as appropriate, on
+ * either success or failure (although there are some forms of
+ * catastrophic failure that will leave the VM unusable). */
+static int
+qemuDomainBlockPivot(struct qemud_driver *driver, virDomainObjPtr vm,
+ const char *device, virDomainDiskDefPtr disk)
+{
+ int ret = -1;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ virDomainBlockJobInfo info;
+
+ /* Probe the status, if needed. */
+ if (!disk->mirroring) {
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ret = qemuMonitorBlockJob(priv->mon, device, NULL, 0, &info,
+ BLOCK_JOB_INFO, true);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (ret < 0)
+ goto cleanup;
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("domain is not running"));
+ goto cleanup;
+ }
+ if (ret == 1 && info.cur == info.end &&
+ info.type == VIR_DOMAIN_BLOCK_JOB_TYPE_COPY)
+ disk->mirroring = true;
+ }
+
+ if (!disk->mirroring) {
+ qemuReportError(VIR_ERR_BLOCK_COPY_ACTIVE,
+ _("disk '%s' not ready for pivot yet"),
+ disk->dst);
+ goto cleanup;
+ }
+
+ /* Attempt the pivot. */
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ret = qemuMonitorDriveReopen(priv->mon, device, disk->mirror,
+ disk->mirrorFormat);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ /* XXX Until qemu adds support for 'drive-reopen' inside
+ * 'transaction', we have the remote risk of a catastrophic
+ * failure, where the drive-reopen fails but can't recover by
+ * reopening the source. Not much we can do about it. */
+
+ if (ret == 0) {
+ /* XXX We want to revoke security labels and disk lease, as
+ * well as audit that revocation, before dropping the original
+ * source. But it gets tricky if both source and mirror share
+ * common backing files (we want to only revoke the non-shared
+ * portion of the chain, and is made more difficult by the
+ * fact that we aren't tracking the full chain ourselves; so
+ * for now, we leak the access to the original. */
+ VIR_FREE(disk->src);
+ VIR_FREE(disk->driverType);
+ disk->src = disk->mirror;
+ disk->driverType = disk->mirrorFormat;
+ disk->mirror = NULL;
+ disk->mirrorFormat = NULL;
+ disk->mirroring = false;
+ } else {
+ /* On failure, qemu abandons the mirror, and attempts to
+ * revert back to the source disk. Hopefully it was able to
+ * reopen things. */
+ /* XXX should we be parsing the exact qemu error, or calling
+ * 'query-block', to see what state we really got left in
+ * before killing the mirroring job? And just as on the
+ * success case, there's security labeling to worry about. */
+ VIR_FREE(disk->mirror);
+ VIR_FREE(disk->mirrorFormat);
+ disk->mirroring = false;
+ }
+
+cleanup:
+ return ret;
+}
+
static int
qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const char *base,
unsigned long bandwidth, virDomainBlockJobInfoPtr info,
@@ -11689,6 +11769,20 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const
char *base,
disk->dst);
goto cleanup;
}
+ if (mode == BLOCK_JOB_ABORT &&
+ (flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)) {
+ if (!(async && disk->mirror)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ _("pivot of disk '%s' requires an active copy
job"),
+ disk->dst);
+ goto cleanup;
+ }
+ if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_DRIVE_REOPEN)) {
+ qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("pivot not supported with this QEMU binary"));
+ goto cleanup;
+ }
+ }
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
@@ -11699,6 +11793,12 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const
char *base,
goto endjob;
}
+ if (disk->mirror && mode == BLOCK_JOB_ABORT &&
+ (flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)) {
+ ret = qemuDomainBlockPivot(driver, vm, device, disk);
+ goto endjob;
+ }
+
qemuDomainObjEnterMonitorWithDriver(driver, vm);
/* XXX - libvirt should really be tracking the backing file chain
* itself, and validating that base is on the chain, rather than
@@ -11718,6 +11818,15 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const
char *base,
info->cur == info->end && info->type ==
VIR_DOMAIN_BLOCK_JOB_TYPE_COPY)
disk->mirroring = true;
+ /* A successful block job cancelation stops any mirroring. */
+ if (mode == BLOCK_JOB_ABORT && disk->mirror) {
+ /* XXX We should also revoke security labels and disk lease on
+ * the mirror, and audit that fact, before dropping things. */
+ VIR_FREE(disk->mirror);
+ VIR_FREE(disk->mirrorFormat);
+ disk->mirroring = false;
+ }
+
/* With synchronous block cancel, we must synthesize an event, and
* we silently ignore the ABORT_ASYNC flag. With asynchronous
* block cancel, the event will come from qemu, but without the
@@ -11782,7 +11891,8 @@ cleanup:
static int
qemuDomainBlockJobAbort(virDomainPtr dom, const char *path, unsigned int flags)
{
- virCheckFlags(VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC, -1);
+ virCheckFlags(VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC |
+ VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT, -1);
return qemuDomainBlockJobImpl(dom, path, NULL, 0, NULL, BLOCK_JOB_ABORT,
flags);
}
--
1.7.7.6