Since a snapshot is fully recoverable, it is useful to have a
snapshot as a means of hibernating a guest, then reverting to
the snapshot to wake the guest up. This mode of usage is
similar to 'virsh save/virsh restore', except that virsh
save uses an external file while virsh snapshot keeps the
vm state internal to a qcow2 file.
In the usage pattern of snapshot/revert for hibernating a guest,
there is no need to keep the guest running between the two points
in time, especially since that would generate runtime state that
would just be discarded. Add a flag to make it possible to
stop the domain after the snapshot has completed.
* include/libvirt/libvirt.h.in (VIR_DOMAIN_SNAPSHOT_CREATE_HALT):
New flag.
* src/libvirt.c (virDomainSnapshotCreateXML): Document it.
* src/qemu/qemu_driver.c (qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotCreateActive): Implement it.
---
Once again, testing this found several other bug fixes for earlier
in the series.
include/libvirt/libvirt.h.in | 5 +++++
src/libvirt.c | 15 ++++++++++++++-
src/qemu/qemu_driver.c | 33 +++++++++++++++++++++++++++++----
3 files changed, 48 insertions(+), 5 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 49fe6b3..e07dc20 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -2559,6 +2559,11 @@ typedef struct _virDomainSnapshot virDomainSnapshot;
*/
typedef virDomainSnapshot *virDomainSnapshotPtr;
+typedef enum {
+ VIR_DOMAIN_SNAPSHOT_CREATE_HALT = (1 << 0), /* Stop running guest after
+ snapshot is complete */
+} virDomainSnapshotCreateFlags;
+
/* Take a snapshot of the current VM state */
virDomainSnapshotPtr virDomainSnapshotCreateXML(virDomainPtr domain,
const char *xmlDesc,
diff --git a/src/libvirt.c b/src/libvirt.c
index 2c84e7e..ffd27bc 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -15464,11 +15464,24 @@ error:
* virDomainSnapshotCreateXML:
* @domain: a domain object
* @xmlDesc: string containing an XML description of the domain
- * @flags: unused flag parameters; callers should pass 0
+ * @flags: bitwise-OR of virDomainSnapshotCreateFlags
*
* Creates a new snapshot of a domain based on the snapshot xml
* contained in xmlDesc.
*
+ * If @flags is 0, the domain can be active, in which case the
+ * snapshot will be a system checkpoint (both disk state and runtime
+ * VM state such as RAM contents), where reverting to the snapshot is
+ * the same as resuming from hibernation (TCP connections may have
+ * timed out, but everything else picks up where it left off); or
+ * the domain can be inactive, in which case the snapshot includes
+ * just the disk state prior to booting.
+ *
+ * If @flags includes VIR_DOMAIN_SNAPSHOT_CREATE_HALT, then the domain
+ * will be inactive after the snapshot completes, regardless of whether
+ * it was active before; otherwise, a running domain will still be
+ * running after the snapshot. This flag is invalid on transient domains.
+ *
* Returns an (opaque) virDomainSnapshotPtr on success, NULL on failure.
*/
virDomainSnapshotPtr
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index e87c11b..4c2706f 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -8603,7 +8603,8 @@ static int
qemuDomainSnapshotCreateActive(virConnectPtr conn,
struct qemud_driver *driver,
virDomainObjPtr *vmptr,
- virDomainSnapshotObjPtr snap)
+ virDomainSnapshotObjPtr snap,
+ unsigned int flags)
{
virDomainObjPtr vm = *vmptr;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -8633,6 +8634,24 @@ qemuDomainSnapshotCreateActive(virConnectPtr conn,
qemuDomainObjEnterMonitorWithDriver(driver, vm);
ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (ret < 0)
+ goto cleanup;
+
+ if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT) {
+ virDomainEventPtr event;
+
+ event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
+ VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
+ qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT);
+ virDomainAuditStop(vm, "from-snapshot");
+ /* We already filtered the _HALT flag for persistent domains
+ * only, so this end job never drops the last reference. */
+ ignore_value(qemuDomainObjEndJob(driver, vm));
+ resume = false;
+ vm = NULL;
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ }
cleanup:
if (resume && virDomainObjIsActive(vm) &&
@@ -8644,7 +8663,7 @@ cleanup:
_("resuming after snapshot failed"));
}
- if (qemuDomainObjEndJob(driver, vm) == 0) {
+ if (vm && qemuDomainObjEndJob(driver, vm) == 0) {
/* Only possible if a transient vm quit while our locks were down,
* in which case we don't want to save snapshot metadata. */
*vmptr = NULL;
@@ -8666,7 +8685,7 @@ static virDomainSnapshotPtr qemuDomainSnapshotCreateXML(virDomainPtr
domain,
char uuidstr[VIR_UUID_STRING_BUFLEN];
virDomainSnapshotDefPtr def = NULL;
- virCheckFlags(0, NULL);
+ virCheckFlags(VIR_DOMAIN_SNAPSHOT_CREATE_HALT, NULL);
qemuDriverLock(driver);
virUUIDFormat(domain->uuid, uuidstr);
@@ -8677,6 +8696,12 @@ static virDomainSnapshotPtr
qemuDomainSnapshotCreateXML(virDomainPtr domain,
goto cleanup;
}
+ if (!vm->persistent && (flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("cannot halt after transient domain snapshot"));
+ goto cleanup;
+ }
+
/* in a perfect world, we would allow qemu to tell us this. The problem
* is that qemu only does this check device-by-device; so if you had a
* domain that booted from a large qcow2 device, but had a secondary raw
@@ -8724,7 +8749,7 @@ static virDomainSnapshotPtr qemuDomainSnapshotCreateXML(virDomainPtr
domain,
goto cleanup;
} else {
if (qemuDomainSnapshotCreateActive(domain->conn, driver,
- &vm, snap) < 0)
+ &vm, snap, flags) < 0)
goto cleanup;
}
--
1.7.4.4