Implement the v3 migration protocol, which has two extra
steps, 'begin' on the source host and 'confirm' on the
source host. All other methods also gain both input and
output cookies to allow bi-directional data passing at
all stages.
The QEMU peer2peer migration method gains another impl
to provide the v3 migration. This finally allows migration
cookies to work with tunnelled migration, which is required
for Spice seemless migration & the lock manager transfer
* src/qemu/qemu_driver.c: Wire up migrate v3 APIs
* src/qemu/qemu_migration.c, src/qemu/qemu_migration.h: Add
begin & confirm methods, and peer2peer impl of v3
---
src/qemu/qemu_driver.c | 318 ++++++++++++++++++++++++++++++++++++++-
src/qemu/qemu_migration.c | 364 +++++++++++++++++++++++++++++++++++++++++++--
src/qemu/qemu_migration.h | 17 ++-
3 files changed, 675 insertions(+), 24 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f382a24..10e8b91 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -870,6 +870,7 @@ qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int
feature)
{
switch (feature) {
case VIR_DRV_FEATURE_MIGRATION_V2:
+ case VIR_DRV_FEATURE_MIGRATION_V3:
case VIR_DRV_FEATURE_MIGRATION_P2P:
return 1;
default:
@@ -5417,7 +5418,9 @@ qemuDomainEventDeregisterAny(virConnectPtr conn,
}
-/* Migration support. */
+/*******************************************************************
+ * Migration Protocol Version 2
+ *******************************************************************/
/* Prepare is the first step, and it runs on the destination host.
*
@@ -5435,6 +5438,15 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
struct qemud_driver *driver = dconn->privateData;
int ret = -1;
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
+
if (!dom_xml) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("no domain XML passed"));
@@ -5554,7 +5566,7 @@ qemudDomainMigratePerform (virDomainPtr dom,
ret = qemuMigrationPerform(driver, dom->conn, vm,
uri, cookie, cookielen,
NULL, NULL, /* No output cookies in v2 */
- flags, dname, resource);
+ flags, dname, resource, true);
cleanup:
qemuDriverUnlock(driver);
@@ -5611,6 +5623,296 @@ cleanup:
}
+/*******************************************************************
+ * Migration Protocol Version 3
+ *******************************************************************/
+
+static char *
+qemuDomainMigrateBegin3(virDomainPtr domain,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname ATTRIBUTE_UNUSED,
+ unsigned long resource ATTRIBUTE_UNUSED)
+{
+ struct qemud_driver *driver = domain->conn->privateData;
+ virDomainObjPtr vm;
+ char *xml = NULL;
+
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, NULL);
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, domain->uuid);
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(domain->uuid, uuidstr);
+ qemuReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ xml = qemuMigrationBegin(driver, vm,
+ cookieout, cookieoutlen);
+
+cleanup:
+ qemuDriverUnlock(driver);
+ return xml;
+}
+
+static int
+qemuDomainMigratePrepare3(virConnectPtr dconn,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ const char *uri_in,
+ char **uri_out,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource ATTRIBUTE_UNUSED,
+ const char *dom_xml)
+{
+ struct qemud_driver *driver = dconn->privateData;
+ int ret = -1;
+
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
+
+ *uri_out = NULL;
+
+ qemuDriverLock(driver);
+ if (flags & VIR_MIGRATE_TUNNELLED) {
+ /* this is a logical error; we never should have gotten here with
+ * VIR_MIGRATE_TUNNELLED set
+ */
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("Tunnelled migration requested but invalid
RPC method called"));
+ goto cleanup;
+ }
+
+ if (!dom_xml) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("no domain XML passed"));
+ goto cleanup;
+ }
+
+ ret = qemuMigrationPrepareDirect(driver, dconn,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ uri_in, uri_out,
+ dname, dom_xml);
+
+cleanup:
+ qemuDriverUnlock(driver);
+ return ret;
+}
+
+
+static int
+qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
+ virStreamPtr st,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource ATTRIBUTE_UNUSED,
+ const char *dom_xml)
+{
+ struct qemud_driver *driver = dconn->privateData;
+ int ret = -1;
+
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
+
+ if (!dom_xml) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("no domain XML passed"));
+ goto cleanup;
+ }
+ if (!(flags & VIR_MIGRATE_TUNNELLED)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("PrepareTunnel called but no TUNNELLED
flag set"));
+ goto cleanup;
+ }
+ if (st == NULL) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("tunnelled migration requested but NULL
stream passed"));
+ goto cleanup;
+ }
+
+ qemuDriverLock(driver);
+ ret = qemuMigrationPrepareTunnel(driver, dconn,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ st, dname, dom_xml);
+ qemuDriverUnlock(driver);
+
+cleanup:
+ return ret;
+}
+
+
+static int
+qemuDomainMigratePerform3(virDomainPtr dom,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ struct qemud_driver *driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ int ret = -1;
+
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(dom->uuid, uuidstr);
+ qemuReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ ret = qemuMigrationPerform(driver, dom->conn, vm,
+ uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource, false);
+
+cleanup:
+ qemuDriverUnlock(driver);
+ return ret;
+}
+
+
+static int
+qemuDomainMigrateFinish3(virConnectPtr dconn,
+ const char *dname,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ const char *uri ATTRIBUTE_UNUSED,
+ unsigned long flags,
+ int cancelled,
+ virDomainPtr *newdom)
+{
+ struct qemud_driver *driver = dconn->privateData;
+ virDomainObjPtr vm;
+ virErrorPtr orig_err;
+ int ret = -1;
+
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
+
+ /* Migration failed. Save the current error so nothing squashes it */
+ orig_err = virSaveLastError();
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByName(&driver->domains, dname);
+ if (!vm) {
+ qemuReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching name '%s'"),
dname);
+ goto cleanup;
+ }
+
+ *newdom = qemuMigrationFinish(driver, dconn, vm,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, cancelled);
+
+ ret = 0;
+
+cleanup:
+ if (orig_err) {
+ virSetError(orig_err);
+ virFreeError(orig_err);
+ }
+ qemuDriverUnlock(driver);
+ return ret;
+}
+
+static int
+qemuDomainMigrateConfirm3(virDomainPtr domain,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned long flags,
+ int cancelled)
+{
+ struct qemud_driver *driver = domain->conn->privateData;
+ virDomainObjPtr vm;
+ int ret = -1;
+
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
+
+ /* Migration failed. Save the current error so nothing squashes it */
+
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, domain->uuid);
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(domain->uuid, uuidstr);
+ qemuReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
+
+ ret = qemuMigrationConfirm(driver, domain->conn, vm,
+ cookiein, cookieinlen,
+ flags, cancelled, false);
+
+cleanup:
+ qemuDriverUnlock(driver);
+ return ret;
+}
+
+
static int
qemudNodeDeviceGetPciInfo (virNodeDevicePtr dev,
unsigned *domain,
@@ -7037,12 +7339,12 @@ static virDriver qemuDriver = {
qemuDomainSnapshotDelete, /* domainSnapshotDelete */
qemuDomainMonitorCommand, /* qemuDomainMonitorCommand */
qemuDomainOpenConsole, /* domainOpenConsole */
- NULL, /* domainMigrateBegin3 */
- NULL, /* domainMigratePrepare3 */
- NULL, /* domainMigratePrepareTunnel3 */
- NULL, /* domainMigratePerform3 */
- NULL, /* domainMigrateFinish3 */
- NULL, /* domainMigrateConfirm3 */
+ qemuDomainMigrateBegin3, /* domainMigrateBegin3 */
+ qemuDomainMigratePrepare3, /* domainMigratePrepare3 */
+ qemuDomainMigratePrepareTunnel3, /* domainMigratePrepareTunnel3 */
+ qemuDomainMigratePerform3, /* domainMigratePerform3 */
+ qemuDomainMigrateFinish3, /* domainMigrateFinish3 */
+ qemuDomainMigrateConfirm3, /* domainMigrateConfirm3 */
};
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 884e26d..fc32a9e 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -773,6 +773,42 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver,
}
+char *qemuMigrationBegin(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ char **cookieout,
+ int *cookieoutlen)
+{
+ char *rv = NULL;
+ qemuMigrationCookiePtr mig = NULL;
+
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto cleanup;
+ }
+
+ if (!qemuMigrationIsAllowed(vm->def))
+ goto cleanup;
+
+ if (!(mig = qemuMigrationEatCookie(vm, NULL, 0, 0)))
+ goto cleanup;
+
+ if (qemuMigrationBakeCookie(mig, driver, vm,
+ cookieout, cookieoutlen,
+ 0) < 0)
+ goto cleanup;
+
+ rv = qemuDomainFormatXML(driver, vm,
+ VIR_DOMAIN_XML_SECURE |
+ VIR_DOMAIN_XML_UPDATE_CPU);
+
+cleanup:
+ virDomainObjUnlock(vm);
+ qemuMigrationCookieFree(mig);
+ return rv;
+}
+
+
/* Prepare is the first step, and it runs on the destination host.
*
* This version starts an empty VM listening on a localhost TCP port, and
@@ -1279,6 +1315,10 @@ static int doTunnelMigrate(struct qemud_driver *driver,
virDomainObjPtr vm,
virStreamPtr st,
virBitmapPtr qemuCaps,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
unsigned long flags,
unsigned long resource ATTRIBUTE_UNUSED)
{
@@ -1292,6 +1332,7 @@ static int doTunnelMigrate(struct qemud_driver *driver,
char *unixfile = NULL;
unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
int ret = -1;
+ qemuMigrationCookiePtr mig = NULL;
if (!qemuCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
!qemuCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
@@ -1352,6 +1393,13 @@ static int doTunnelMigrate(struct qemud_driver *driver,
goto cleanup;
}
+ if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen,
+ QEMU_MIGRATION_COOKIE_GRAPHICS)))
+ goto cleanup;
+
+ if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0)
+ VIR_WARN0("unable to provide data for graphics client relocation");
+
/* 3. start migration on source */
qemuDomainObjEnterMonitorWithDriver(driver, vm);
@@ -1417,6 +1465,10 @@ static int doTunnelMigrate(struct qemud_driver *driver,
ret = doTunnelSendAll(st, client_sock);
+ if (ret == 0 &&
+ qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
+ VIR_WARN0("Unable to encode migration cookie");
+
cancel:
if (ret != 0 && virDomainObjIsActive(vm)) {
qemuDomainObjEnterMonitorWithDriver(driver, vm);
@@ -1425,6 +1477,7 @@ cancel:
}
cleanup:
+ qemuMigrationCookieFree(mig);
VIR_FORCE_CLOSE(client_sock);
VIR_FORCE_CLOSE(qemu_sock);
if (unixfile) {
@@ -1528,7 +1581,9 @@ static int doPeer2PeerMigrate2(struct qemud_driver *driver,
*/
VIR_DEBUG("Perform %p", sconn);
if (flags & VIR_MIGRATE_TUNNELLED)
- ret = doTunnelMigrate(driver, vm, st, qemuCaps, flags, resource);
+ ret = doTunnelMigrate(driver, vm, st, qemuCaps,
+ NULL, 0, NULL, NULL,
+ flags, resource);
else
ret = doNativeMigrate(driver, vm, uri_out,
cookie, cookielen,
@@ -1580,6 +1635,191 @@ cleanup:
}
+/* This is essentially a re-impl of virDomainMigrateVersion3
+ * from libvirt.c, but running in source libvirtd context,
+ * instead of client app context & also adding in tunnel
+ * handling */
+static int doPeer2PeerMigrate3(struct qemud_driver *driver,
+ virConnectPtr sconn,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ virDomainPtr ddomain = NULL;
+ char *uri_out = NULL;
+ char *cookiein = NULL;
+ char *cookieout = NULL;
+ char *dom_xml = NULL;
+ int cookieinlen = 0;
+ int cookieoutlen = 0;
+ int ret = -1;
+ virErrorPtr orig_err = NULL;
+ int cancelled;
+ virBitmapPtr qemuCaps = NULL;
+ virStreamPtr st = NULL;
+
+ /* check that this qemu version supports the unix migration */
+ if (qemuCapsExtractVersionInfo(vm->def->emulator, vm->def->os.arch,
+ NULL, &qemuCaps) < 0) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Cannot extract Qemu version from '%s'"),
+ vm->def->emulator);
+ return -1;
+ }
+
+ VIR_DEBUG("Begin3 %p", sconn);
+ dom_xml = qemuMigrationBegin(driver, vm,
+ &cookieout, &cookieoutlen);
+ if (!dom_xml)
+ goto cleanup;
+
+ if (vm->state == VIR_DOMAIN_PAUSED)
+ flags |= VIR_MIGRATE_PAUSED;
+
+ VIR_DEBUG("Prepare3 %p", dconn);
+ cookiein = cookieout;
+ cookieinlen = cookieoutlen;
+ cookieout = NULL;
+ cookieoutlen = 0;
+ if (flags & VIR_MIGRATE_TUNNELLED) {
+ /*
+ * Tunnelled Migrate Version 2 does not support cookies
+ * due to missing parameters in the prepareTunnel() API.
+ */
+
+ if (!(st = virStreamNew(dconn, 0)))
+ goto cleanup;
+
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ ret = dconn->driver->domainMigratePrepareTunnel3
+ (dconn, st, cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, dname, resource, dom_xml);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+ } else {
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ ret = dconn->driver->domainMigratePrepare3
+ (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
+ NULL, &uri_out, flags, dname, resource, dom_xml);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+ }
+ VIR_FREE(dom_xml);
+ if (ret == -1)
+ goto cleanup;
+
+ if (!(flags & VIR_MIGRATE_TUNNELLED) &&
+ (uri_out == NULL)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("domainMigratePrepare3 did not set uri"));
+ cancelled = 1;
+ goto finish;
+ }
+
+ /* Perform the migration. The driver isn't supposed to return
+ * until the migration is complete. The src VM should remain
+ * running, but in paused state until the destination can
+ * confirm migration completion.
+ */
+ VIR_DEBUG("Perform3 %p uri=%s", sconn, uri_out);
+ VIR_FREE(cookiein);
+ cookiein = cookieout;
+ cookieinlen = cookieoutlen;
+ cookieout = NULL;
+ cookieoutlen = 0;
+ if (flags & VIR_MIGRATE_TUNNELLED)
+ ret = doTunnelMigrate(driver, vm, st, qemuCaps,
+ cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, resource);
+ else
+ ret = doNativeMigrate(driver, vm, uri_out,
+ cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, dname, resource);
+
+ /* Perform failed. Make sure Finish doesn't overwrite the error */
+ if (ret < 0)
+ orig_err = virSaveLastError();
+
+ /* If Perform returns < 0, then we need to cancel the VM
+ * startup on the destination
+ */
+ cancelled = ret < 0 ? 1 : 0;
+
+finish:
+ /*
+ * The status code from the source is passed to the destination.
+ * The dest can cleanup in the source indicated it failed to
+ * send all migration data. Returns NULL for ddomain if
+ * the dest was unable to complete migration.
+ */
+ VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
+ VIR_FREE(cookiein);
+ cookiein = cookieout;
+ cookieinlen = cookieoutlen;
+ cookieout = NULL;
+ cookieoutlen = 0;
+ dname = dname ? dname : vm->def->name;
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ ret = dconn->driver->domainMigrateFinish3
+ (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
+ uri_out ? uri_out : uri, flags, cancelled, &ddomain);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+
+ /* If ret is 0 then 'ddomain' indicates whether the VM is
+ * running on the dest. If not running, we can restart
+ * the source. If ret is -1, we can't be sure what happened
+ * to the VM on the dest, thus the only safe option is to
+ * kill the VM on the source, even though that may leave
+ * no VM at all on either host.
+ */
+ cancelled = ret == 0 && ddomain == NULL ? 1 : 0;
+
+ /*
+ * If cancelled, then src VM will be restarted, else
+ * it will be killed
+ */
+ VIR_DEBUG("Confirm3 %p ret=%d vm=%p", sconn, ret, vm);
+ VIR_FREE(cookiein);
+ cookiein = cookieout;
+ cookieinlen = cookieoutlen;
+ cookieout = NULL;
+ cookieoutlen = 0;
+ ret = qemuMigrationConfirm(driver, sconn, vm,
+ cookiein, cookieinlen,
+ flags, cancelled, true);
+ /* If Confirm3 returns -1, there's nothing more we can
+ * do, but fortunately worst case is that there is a
+ * domain left in 'paused' state on source.
+ */
+
+ cleanup:
+ if (ddomain) {
+ virUnrefDomain(ddomain);
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+ qemuCapsFree(qemuCaps);
+
+ if (st)
+ virUnrefStream(st);
+
+ if (orig_err) {
+ virSetError(orig_err);
+ virFreeError(orig_err);
+ }
+ VIR_FREE(uri_out);
+ VIR_FREE(cookiein);
+ VIR_FREE(cookieout);
+
+ return ret;
+}
+
+
static int doPeer2PeerMigrate(struct qemud_driver *driver,
virConnectPtr sconn,
virDomainObjPtr vm,
@@ -1591,6 +1831,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
int ret = -1;
virConnectPtr dconn = NULL;
bool p2p;
+ bool v3;
/* the order of operations is important here; we make sure the
* destination side is completely setup before we touch the source
@@ -1608,7 +1849,10 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
qemuDomainObjEnterRemoteWithDriver(driver, vm);
p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_P2P);
+ v3 = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
+ VIR_DRV_FEATURE_MIGRATION_V3);
qemuDomainObjExitRemoteWithDriver(driver, vm);
+
if (!p2p) {
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("Destination libvirt does not support peer-to-peer
migration protocol"));
@@ -1622,8 +1866,12 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
goto cleanup;
}
- ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
- uri, flags, dname, resource);
+ if (v3)
+ ret = doPeer2PeerMigrate3(driver, sconn, dconn, vm,
+ uri, flags, dname, resource);
+ else
+ ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
+ uri, flags, dname, resource);
cleanup:
/* don't call virConnectClose(), because that resets any pending errors */
@@ -1645,7 +1893,8 @@ int qemuMigrationPerform(struct qemud_driver *driver,
int *cookieoutlen,
unsigned long flags,
const char *dname,
- unsigned long resource)
+ unsigned long resource,
+ bool killOnFinish)
{
virDomainEventPtr event = NULL;
int ret = -1;
@@ -1689,18 +1938,20 @@ int qemuMigrationPerform(struct qemud_driver *driver,
}
/* Clean up the source domain. */
- qemuProcessStop(driver, vm, 1);
- qemuAuditDomainStop(vm, "migrated");
- resume = 0;
+ if (killOnFinish) {
+ qemuProcessStop(driver, vm, 1);
+ qemuAuditDomainStop(vm, "migrated");
+ resume = 0;
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_STOPPED,
- VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
- if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) {
- virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
- if (qemuDomainObjEndJob(vm) > 0)
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STOPPED,
+ VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
+ if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) {
+ virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
+ if (qemuDomainObjEndJob(vm) > 0)
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
}
ret = 0;
@@ -1906,6 +2157,89 @@ cleanup:
return dom;
}
+
+int qemuMigrationConfirm(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int retcode,
+ bool skipJob)
+{
+ qemuMigrationCookiePtr mig;
+ virDomainEventPtr event = NULL;
+ int rv = -1;
+
+ if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0)))
+ return -1;
+
+ if (!skipJob &&
+ qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto endjob;
+ }
+
+ /* Did the migration go as planned? If yes, kill off the
+ * domain object, but if no, resume CPUs
+ */
+ if (retcode == 0) {
+ qemuProcessStop(driver, vm, 1);
+ qemuAuditDomainStop(vm, "migrated");
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STOPPED,
+ VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
+ if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) {
+ virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
+ if (qemuDomainObjEndJob(vm) > 0)
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+ } else {
+
+ /* run 'cont' on the destination, which allows migration on qemu
+ * >= 0.10.6 to work properly. This isn't strictly necessary on
+ * older qemu's, but it also doesn't hurt anything there
+ */
+ if (qemuProcessStartCPUs(driver, vm, conn) < 0) {
+ if (virGetLastError() == NULL)
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("resume operation failed"));
+ goto endjob;
+ }
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_RESUMED,
+ VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
+ if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto endjob;
+ }
+ }
+
+ qemuMigrationCookieFree(mig);
+ rv = 0;
+
+endjob:
+ if (vm &&
+ !skipJob &&
+ qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+
+cleanup:
+ if (vm)
+ virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ return rv;
+}
+
+
/* Helper function called while driver lock is held and vm is active. */
int
qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 11571e7..42d8a42 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -32,6 +32,11 @@ int qemuMigrationSetOffline(struct qemud_driver *driver,
int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm);
+char *qemuMigrationBegin(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ char **cookieout,
+ int *cookieoutlen);
+
int qemuMigrationPrepareTunnel(struct qemud_driver *driver,
virConnectPtr dconn,
const char *cookiein,
@@ -63,7 +68,8 @@ int qemuMigrationPerform(struct qemud_driver *driver,
int *cookieoutlen,
unsigned long flags,
const char *dname,
- unsigned long resource);
+ unsigned long resource,
+ bool killOnFinish);
virDomainPtr qemuMigrationFinish(struct qemud_driver *driver,
virConnectPtr dconn,
@@ -75,6 +81,15 @@ virDomainPtr qemuMigrationFinish(struct qemud_driver *driver,
unsigned long flags,
int retcode);
+int qemuMigrationConfirm(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int retcode,
+ bool skipJob);
+
int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
virBitmapPtr qemuCaps,
--
1.7.4.4