https://bugzilla.redhat.com/show_bug.cgi?id=916061
If the QEMU version running is new enough (based on the DUMP_COMPLETED
event), then we can add a 'detach' boolean to the dump-guest-memory
command in order to tell QEMU to run in a thread. This ensures that we
don't lock out other commands while the potentially long running dump
memory is completed.
This allows the usage of a qemuDumpWaitForCompletion which will wait
for the event while the qemuDomainGetJobInfoDumpStats can be used via
qemuDomainGetJobInfo in order to query QEMU to determine how far along
the job is.
Now that we have a true async job, we'll only set the dump_memory_only
flag only when @detach=false; otherwise, we note that the job is a
for stats dump this allows the opposite end for job info to determine
what to copy.
Signed-off-by: John Ferlan <jferlan(a)redhat.com>
---
src/qemu/qemu_driver.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 60 insertions(+), 5 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 00a9900f9..082b663a0 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -3760,6 +3760,49 @@ qemuDomainManagedSaveRemove(virDomainPtr dom, unsigned int flags)
}
+/**
+ * qemuDumpWaitForCompletion:
+ * @vm: domain object
+ *
+ * If the query dump capability exists, then it's possible to start a
+ * guest memory dump operation using a thread via a 'detach' qualifier
+ * to the dump guest memory command. This allows the async check if the
+ * dump is done.
+ *
+ * Returns 0 on success, -1 on failure
+ */
+static int
+qemuDumpWaitForCompletion(virDomainObjPtr vm)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ int ret = -1;
+
+ VIR_DEBUG("Waiting for dump completion");
+ while (!priv->job.dumpCompleted && !priv->job.abortJob) {
+ if (virDomainObjWait(vm) < 0)
+ return -1;
+ }
+
+ if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
+ if (priv->job.error)
+ virReportError(VIR_ERR_OPERATION_FAILED,
+ _("memory-only dump failed: %s"),
+ priv->job.error);
+ else
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("memory-only dump failed for unknown reason"));
+
+ goto cleanup;
+ }
+ qemuDomainJobInfoUpdateTime(priv->job.current);
+
+ ret = 0;
+
+ cleanup:
+ return ret;
+}
+
+
static int
qemuDumpToFd(virQEMUDriverPtr driver,
virDomainObjPtr vm,
@@ -3768,6 +3811,7 @@ qemuDumpToFd(virQEMUDriverPtr driver,
const char *dumpformat)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
+ bool detach = false;
int ret = -1;
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DUMP_GUEST_MEMORY)) {
@@ -3776,11 +3820,17 @@ qemuDumpToFd(virQEMUDriverPtr driver,
return -1;
}
+ detach = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DUMP_COMPLETED);
+
if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
return -1;
- VIR_FREE(priv->job.current);
- priv->job.dump_memory_only = true;
+ if (detach) {
+ priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
+ } else {
+ VIR_FREE(priv->job.current);
+ priv->job.dump_memory_only = true;
+ }
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
@@ -3794,15 +3844,20 @@ qemuDumpToFd(virQEMUDriverPtr driver,
"for this QEMU binary"),
dumpformat);
ret = -1;
+ ignore_value(qemuDomainObjExitMonitor(driver, vm));
goto cleanup;
}
}
- ret = qemuMonitorDumpToFd(priv->mon, fd, dumpformat, false);
+ ret = qemuMonitorDumpToFd(priv->mon, fd, dumpformat, detach);
- cleanup:
- ignore_value(qemuDomainObjExitMonitor(driver, vm));
+ if ((qemuDomainObjExitMonitor(driver, vm) < 0) || ret < 0)
+ goto cleanup;
+ if (detach)
+ ret = qemuDumpWaitForCompletion(vm);
+
+ cleanup:
return ret;
}
--
2.13.6