On 12/3/19 11:17 AM, Peter Krempa wrote:
This allows to start and manage the backup job.
Signed-off-by: Peter Krempa <pkrempa(a)redhat.com>
---
resuming where I left off last time
+
+static int
+qemuBackupDiskPrepareDataOne(virDomainObjPtr vm,
+
+static int
+qemuBackupDiskPrepareDataOnePush(virJSONValuePtr actions,
+ struct qemuBackupDiskData *dd)
+{
+ qemuMonitorTransactionBackupSyncMode syncmode =
QEMU_MONITOR_TRANSACTION_BACKUP_SYNC_MODE_FULL;
+
+ if (dd->incrementalBitmap)
+ syncmode = QEMU_MONITOR_TRANSACTION_BACKUP_SYNC_MODE_INCREMENTAL;
Looks correct for both forms of push mode backup.
+
+static int
+qemuBackupDiskPrepareDataOnePull(virJSONValuePtr actions,
+ struct qemuBackupDiskData *dd)
+{
+ if (qemuMonitorTransactionBackup(actions,
+ dd->domdisk->src->nodeformat,
+ dd->blockjob->name,
+ dd->store->nodeformat,
+ NULL,
+ QEMU_MONITOR_TRANSACTION_BACKUP_SYNC_MODE_NONE)
< 0)
and this is the correct mode for the blockjob used in managing push mode
backup.
+
+static int
+qemuBackupDiskPrepareOneStorage(virDomainObjPtr vm,
+ virHashTablePtr blockNamedNodeData,
+ struct qemuBackupDiskData *dd,
+ bool reuse_external)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ int rc;
+
+ if (!reuse_external &&
+ dd->store->type == VIR_STORAGE_TYPE_FILE &&
+ virStorageFileSupportsCreate(dd->store)) {
+
+ if (virFileExists(dd->store->path)) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("store '%s' for backup of '%s'
existst"),
exists
+ dd->store->path,
dd->domdisk->dst);
+ return -1;
+ }
+
+ if (qemuDomainStorageFileInit(priv->driver, vm, dd->store, NULL) < 0)
+ return -1;
+
+ dd->initialized = true;
+
+ if (virStorageFileCreate(dd->store) < 0) {
+ virReportSystemError(errno,
+ _("failed to create image file
'%s'"),
+ NULLSTR(dd->store->path));
+ return -1;
+ }
+
+ dd->created = true;
+ }
+
+ if (qemuDomainStorageSourceAccessAllow(priv->driver, vm, dd->store, false,
+ true) < 0)
+ return -1;
+
+ dd->labelled = true;
+
+ if (!reuse_external) {
+ if (qemuBlockStorageSourceCreateDetectSize(blockNamedNodeData,
+ dd->store, dd->domdisk->src)
< 0)
+ return -1;
+
+ if (qemuBlockStorageSourceCreate(vm, dd->store, NULL, NULL,
+ dd->crdata->srcdata[0],
+ QEMU_ASYNC_JOB_BACKUP) < 0)
+ return -1;
+ } else {
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP)
< 0)
+ return -1;
+
+ rc = qemuBlockStorageSourceAttachApply(priv->mon,
dd->crdata->srcdata[0]);
+
+ if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
+ return -1;
Offlist, we were wondering if this should blindly trust whatever is
already in the external file, or blindly pass backing:null. It may
depend on whether it is push mode (probably trust the image) vs. pull
mode (we will be hooking up the backing file ourselves when we create
the sync:none job, so if the scratch disk already has a backing file
that gets in the way, which argues we want a blind backing:null in that
case).
+/**
+ * qemuBackupBeginPullExportDisks:
+ * @vm: domain object
+ * @disks: backup disk data list
+ * @ndisks: number of valid disks in @disks
+ *
+ * Exports all disks from @dd when doing a pull backup in the NBD server. This
+ * function must be called while in the monitor context.
+ */
+static int
+qemuBackupBeginPullExportDisks(virDomainObjPtr vm,
+ struct qemuBackupDiskData *disks,
+ size_t ndisks)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ size_t i;
+
+ for (i = 0; i < ndisks; i++) {
+ struct qemuBackupDiskData *dd = disks + i;
+
+ if (qemuMonitorNBDServerAdd(priv->mon,
+ dd->store->nodeformat,
+ dd->domdisk->dst,
+ false,
+ dd->incrementalBitmap) < 0)
+ return -1;
+ }
When there's more than one disk, this process is non-atomic (a lucky
observer on the NBD port could see one but not all of the disks exported
yet). Or even with just one disk, a lucky observer can see the NBD port
created but no disk exported yet. At any rate, until the libvirt backup
API returns, the user shouldn't be trying to probe the NBD port anyway,
so this doesn't bother me.
+
+ return 0;
+}
+
+
+/**
+ * qemuBackupBeginCollectIncrementalCheckpoints:
+ * @vm: domain object
+ * @incrFrom: name of checkpoint representing starting point of incremental backup
+ *
+ * Returns a NULL terminated list of pointers to checkpoints in chronological
+ * order starting from the 'current' checkpoint until reaching @incrFrom.
+ */
+static virDomainMomentObjPtr *
+qemuBackupBeginCollectIncrementalCheckpoints(virDomainObjPtr vm,
+ const char *incrFrom)
+{
+ virDomainMomentObjPtr n = virDomainCheckpointGetCurrent(vm->checkpoints);
+ g_autofree virDomainMomentObjPtr *incr = NULL;
+ size_t nincr = 0;
+
+ while (n) {
+ if (VIR_APPEND_ELEMENT_COPY(incr, nincr, n) < 0)
Are there better glib functions for doing this string management?
+ return NULL;
+
+ if (STREQ(n->def->name, incrFrom)) {
+ virDomainMomentObjPtr terminator = NULL;
+ if (VIR_APPEND_ELEMENT_COPY(incr, nincr, terminator) < 0)
+ return NULL;
+
+ return g_steal_pointer(&incr);
+ }
+
+ if (!n->def->parent_name)
+ break;
+
+ n = virDomainCheckpointFindByName(vm->checkpoints,
n->def->parent_name);
+ }
+
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ _("could not locate checkpoint '%s' for incremental
backup"),
+ incrFrom);
+ return NULL;
+}
+
+
+/**
+ * qemuBackupJobCancelBlockjobs:
+ * @vm: domain object
+ * @backup: backup definition
+ * @terminatebackup: flag whether to terminate and unregister the backup
+ *
+ * Sends all active blockjobs which are part of @backup of @vm a signal to
+ * cancel. If @terminatebackup is true qemuBackupJobTerminate is also called
+ * if there are no outstanding active blockjobs.
+ */
+void
+qemuBackupJobCancelBlockjobs(virDomainObjPtr vm,
+ virDomainBackupDefPtr backup,
+ bool terminatebackup)
+{
+
+int
+qemuBackupBegin(virDomainObjPtr vm,
+ const char *backupXML,
+ const char *checkpointXML,
+ unsigned int flags)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
+ g_autoptr(virDomainBackupDef) def = NULL;
+ g_autoptr(virCaps) caps = NULL;
+ g_autofree char *suffix = NULL;
+ struct timeval tv;
+ bool pull = false;
+ virDomainMomentObjPtr chk = NULL;
+ g_autoptr(virDomainCheckpointDef) chkdef = NULL;
+ g_autofree virDomainMomentObjPtr *incremental = NULL;
+ g_autoptr(virJSONValue) actions = NULL;
+ struct qemuBackupDiskData *dd = NULL;
+ ssize_t ndd = 0;
+ g_autoptr(virHashTable) blockNamedNodeData = NULL;
+ bool job_started = false;
+ bool nbd_running = false;
+ bool reuse = (flags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL);
+ int rc = 0;
+ int ret = -1;
+
+ virCheckFlags(VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL, -1);
Adding support for a QUIESCE flag would also be nice (using the guest
agent to auto-freeze filesystems prior to kicking off the
checkpoint/backup). But that can be followup.
+
+ if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_INCREMENTAL_BACKUP)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("incremental backup is not supported yet"));
+ return -1;
+ }
Is it safe to be reporting errors like this prior to ACL checks? Can it
be used as an information leak?
/me reads further
Actually, isn't this missing an ACL check?
/me reads even further
Aha - this is a helper function, and the ACL check is performed in a
different file, before this point. Okay, not a problem after all.
+
+ if (!(caps = virQEMUDriverGetCapabilities(priv->driver, false)))
+ return -1;
+
+ if (!(def = virDomainBackupDefParseString(backupXML, priv->driver->xmlopt,
0)))
+ return -1;
+
+ if (checkpointXML) {
+ if (!(chkdef = virDomainCheckpointDefParseString(checkpointXML, caps,
+ priv->driver->xmlopt,
+ priv->qemuCaps, 0)))
+ return -1;
+
+ suffix = g_strdup(chkdef->parent.name);
+ } else {
+ gettimeofday(&tv, NULL);
+ suffix = g_strdup_printf("%lld", (long long)tv.tv_sec);
+ }
+
+ if (def->type == VIR_DOMAIN_BACKUP_TYPE_PULL)
+ pull = true;
+
+ /* we'll treat this kind of backup job as an asyncjob as it uses some of the
+ * infrastructure for async jobs. We'll allow standard modify-type jobs
+ * as the interlocking of conflicting operations is handled on the block
+ * job level */
+ if (qemuDomainObjBeginAsyncJob(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP,
+ VIR_DOMAIN_JOB_OPERATION_BACKUP, flags) < 0)
+ return -1;
+
+ qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
+ JOB_MASK(QEMU_JOB_SUSPEND) |
+ JOB_MASK(QEMU_JOB_MODIFY)));
+ priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
+
+
+ if (!virDomainObjIsActive(vm)) {
Is the double blank line intentional?
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
"%s",
+ _("cannot perform disk backup for inactive domain"));
+ goto endjob;
+ }
+
+ if (priv->backup) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("another backup job is already running"));
+ goto endjob;
+ }
+
+ if (qemuBackupPrepare(def) < 0)
+ goto endjob;
+
+ if (virDomainBackupAlignDisks(def, vm->def, suffix) < 0)
+ goto endjob;
+
+ if (def->incremental &&
+ !(incremental = qemuBackupBeginCollectIncrementalCheckpoints(vm,
def->incremental)))
+ goto endjob;
+
+ if (!(actions = virJSONValueNewArray()))
+ goto endjob;
+
+ if (chkdef) {
+ if (qemuCheckpointCreateCommon(priv->driver, vm, caps, &chkdef,
+ &actions, &chk) < 0)
+ goto endjob;
+ }
+
+ if ((ndd = qemuBackupDiskPrepareData(vm, def, incremental, actions, cfg, &dd,
+ reuse)) <= 0) {
+ if (ndd == 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("no disks selected for backup"));
+ }
+
+ goto endjob;
+ }
Lots of prep work before kicking off the backup :) But it looks good so
far.
+
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) <
0)
+ goto endjob;
+ blockNamedNodeData = qemuMonitorBlockGetNamedNodeData(priv->mon);
+ if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || !blockNamedNodeData)
+ goto endjob;
Do you still need to ask the monitor for named node data, or should you
already have access to all that information since backups require
-blockdev support?
+
+ if (qemuBackupDiskPrepareStorage(vm, dd, ndd, blockNamedNodeData, reuse) < 0)
+ goto endjob;
+
+ priv->backup = g_steal_pointer(&def);
+
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) <
0)
+ goto endjob;
+
+ /* TODO: TLS is a must-have for the modern age */
+ if (pull) {
+ if ((rc = qemuMonitorNBDServerStart(priv->mon, priv->backup->server,
NULL)) == 0)
+ nbd_running = true;
+ }
Yep, adding TLS in a followup patch will be necessary. But one step at
a time.
+
+ if (rc == 0)
+ rc = qemuMonitorTransaction(priv->mon, &actions);
+
+ if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
+ goto endjob;
+
+ job_started = true;
+ qemuBackupDiskStarted(vm, dd, ndd);
+
+ if (chk &&
+ qemuCheckpointCreateFinalize(priv->driver, vm, cfg, chk, true) < 0)
+ goto endjob;
+
+ if (pull) {
+ if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP)
< 0)
+ goto endjob;
+ /* note that if the export fails we've already created the checkpoint
+ * and we will not delete it */
Why not?
+ rc = qemuBackupBeginPullExportDisks(vm, dd, ndd);
+ if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
+ goto endjob;
+
+ if (rc < 0) {
+ qemuBackupJobCancelBlockjobs(vm, priv->backup, false);
+ goto endjob;
+ }
+ }
+
+ ret = 0;
+
+ endjob:
+ qemuBackupDiskDataCleanup(vm, dd, ndd);
+ if (!job_started && nbd_running &&
+ qemuDomainObjEnterMonitorAsync(priv->driver, vm, QEMU_ASYNC_JOB_BACKUP) <
0) {
+ ignore_value(qemuMonitorNBDServerStop(priv->mon));
+ ignore_value(qemuDomainObjExitMonitor(priv->driver, vm));
+ }
+
+ if (ret < 0 && !job_started)
+ def = g_steal_pointer(&priv->backup);
+
+ if (ret == 0)
+ qemuDomainObjReleaseAsyncJob(vm);
+ else
+ qemuDomainObjEndAsyncJob(priv->driver, vm);
+
+ return ret;
+}
+
+
+char *
+qemuBackupGetXMLDesc(virDomainObjPtr vm,
+ unsigned int flags)
+{
+ g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
+ virDomainBackupDefPtr backup;
+
+ virCheckFlags(0, NULL);
+
Missing ACL check?
/me repeats exercise of reading further
No, this is a helper function.
+ if (!(backup = qemuDomainGetBackup(vm)))
+ return NULL;
+
+ if (virDomainBackupDefFormat(&buf, backup, false) < 0)
+ return NULL;
+
+ return virBufferContentAndReset(&buf);
+}
+
+
+void
+qemuBackupNotifyBlockjobEnd(virDomainObjPtr vm,
+ virDomainDiskDefPtr disk,
+ qemuBlockjobState state,
+ unsigned long long cur,
+ unsigned long long end)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ bool has_running = false;
+ bool has_cancelling = false;
+ bool has_cancelled = false;
+ bool has_failed = false;
+ qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
+ virDomainBackupDefPtr backup = priv->backup;
+ size_t i;
+
+ VIR_DEBUG("vm: '%s', disk:'%s', state:'%d'",
+ vm->def->name, disk->dst, state);
+
+ if (!backup)
+ return;
+
+ if (backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL) {
+ qemuDomainObjEnterMonitor(priv->driver, vm);
+ ignore_value(qemuMonitorNBDServerStop(priv->mon));
+ if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
+ return;
+
+ /* update the final statistics with the current job's data */
+ backup->pull_tmp_used += cur;
+ backup->pull_tmp_total += end;
+ } else {
+ backup->push_transferred += cur;
+ backup->push_total += end;
+ }
+
If I understand, this is no longer an API entry point, but now a helper
function called by the existing virDomainAbortJob API, so this one does
not need an ACL check (as that would already have been performed).
+ for (i = 0; i < backup->ndisks; i++) {
+ virDomainBackupDiskDefPtr backupdisk = backup->disks + i;
+
+ if (!backupdisk->store)
+ continue;
+
+ if (STREQ(disk->dst, backupdisk->name)) {
+ switch (state) {
+ case QEMU_BLOCKJOB_STATE_COMPLETED:
+ backupdisk->state = VIR_DOMAIN_BACKUP_DISK_STATE_COMPLETE;
+ break;
+
+ case QEMU_BLOCKJOB_STATE_CONCLUDED:
+ case QEMU_BLOCKJOB_STATE_FAILED:
+ backupdisk->state = VIR_DOMAIN_BACKUP_DISK_STATE_FAILED;
+ break;
+
+ case QEMU_BLOCKJOB_STATE_CANCELLED:
+ backupdisk->state = VIR_DOMAIN_BACKUP_DISK_STATE_CANCELLED;
+ break;
+
+ case QEMU_BLOCKJOB_STATE_READY:
+ case QEMU_BLOCKJOB_STATE_NEW:
+ case QEMU_BLOCKJOB_STATE_RUNNING:
+ case QEMU_BLOCKJOB_STATE_ABORTING:
+ case QEMU_BLOCKJOB_STATE_PIVOTING:
+ case QEMU_BLOCKJOB_STATE_LAST:
+ default:
+ break;
+ }
+ }
+
+ switch (backupdisk->state) {
+ case VIR_DOMAIN_BACKUP_DISK_STATE_COMPLETE:
+ break;
+
+ case VIR_DOMAIN_BACKUP_DISK_STATE_RUNNING:
+ has_running = true;
+ break;
+
+ case VIR_DOMAIN_BACKUP_DISK_STATE_CANCELLING:
+ has_cancelling = true;
+ break;
+
+ case VIR_DOMAIN_BACKUP_DISK_STATE_FAILED:
+ has_failed = true;
+ break;
+
+ case VIR_DOMAIN_BACKUP_DISK_STATE_CANCELLED:
+ has_cancelled = true;
+ break;
+
+ case VIR_DOMAIN_BACKUP_DISK_STATE_NONE:
+ case VIR_DOMAIN_BACKUP_DISK_STATE_LAST:
+ break;
+ }
+ }
+
+ if (has_running && (has_failed || has_cancelled)) {
+ /* cancel the rest of the jobs */
+ qemuBackupJobCancelBlockjobs(vm, backup, false);
+ } else if (!has_running && !has_cancelling) {
+ /* all sub-jobs have stopped */
+
+ if (has_failed)
+ jobstatus = QEMU_DOMAIN_JOB_STATUS_FAILED;
+ else if (has_cancelled && backup->type ==
VIR_DOMAIN_BACKUP_TYPE_PUSH)
+ jobstatus = QEMU_DOMAIN_JOB_STATUS_CANCELED;
+
+ qemuBackupJobTerminate(vm, jobstatus);
+ }
+
+ /* otherwise we must wait for the jobs to end */
+}
diff --git a/src/qemu/qemu_backup.h b/src/qemu/qemu_backup.h
+++ b/src/qemu/qemu_driver.c
@@ -52,6 +52,7 @@
#include "qemu_blockjob.h"
#include "qemu_security.h"
#include "qemu_checkpoint.h"
+#include "qemu_backup.h"
#include "virerror.h"
#include "virlog.h"
@@ -17212,6 +17213,50 @@ qemuDomainCheckpointDelete(virDomainCheckpointPtr checkpoint,
}
+static int
+qemuDomainBackupBegin(virDomainPtr domain,
+ const char *backupXML,
+ const char *checkpointXML,
+ unsigned int flags)
+{
+ virDomainObjPtr vm = NULL;
+ int ret = -1;
+
+ if (!(vm = qemuDomainObjFromDomain(domain)))
+ goto cleanup;
+
+ if (virDomainBackupBeginEnsureACL(domain->conn, vm->def) < 0)
+ goto cleanup;
+
+ ret = qemuBackupBegin(vm, backupXML, checkpointXML, flags);
And here's what I was missing in my initial read of one file when asking
about ACL checks above.
@@ -22953,6 +22998,8 @@ static virHypervisorDriver
qemuHypervisorDriver = {
.domainCheckpointDelete = qemuDomainCheckpointDelete, /* 5.6.0 */
.domainGetGuestInfo = qemuDomainGetGuestInfo, /* 5.7.0 */
.domainAgentSetResponseTimeout = qemuDomainAgentSetResponseTimeout, /* 5.10.0 */
+ .domainBackupBegin = qemuDomainBackupBegin, /* 5.10.0 */
+ .domainBackupGetXMLDesc = qemuDomainBackupGetXMLDesc, /* 5.10.0 */
6.0.
There, I've reviewed the whole file. There's probably a few tweaks
based on what I've pointed out, but we're very close to having this be
ready for inclusion.
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3226
Virtualization:
qemu.org |
libvirt.org