On Mon, Nov 05, 2012 at 12:17:50 +0800, liguang wrote:
original migration did not aware of offline case,
so, try to support offline migration quietly
(did not disturb original migration) by pass
VIR_MIGRATE_OFFLINE flag to migration APIs if only
the domain is really inactive, and
migration process will not puzzled by domain
offline and exit unexpectedly.
these changes did not take care of disk images the
domain required, for them could be transferred by
other APIs as suggested, then VIR_MIGRATE_OFFLINE
must not combined with VIR_MIGRATE_NON_SHARED_*.
if you want a persistent migration,
you should do "virsh migrate --persistent" youself.
...
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 978af57..6c2bf98 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -9796,7 +9796,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
asyncJob = QEMU_ASYNC_JOB_NONE;
}
- if (!virDomainObjIsActive(vm)) {
+ if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
@@ -9805,9 +9805,9 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
/* Check if there is any ejected media.
* We don't want to require them on the destination.
*/
-
- if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
- goto endjob;
+ if (virDomainObjIsActive(vm))
+ if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
+ goto endjob;
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
cookieout, cookieoutlen,
What if you want to offline migrate a domain that is currently running? I
think it's better to first move qemuDomainCheckEjectableMedia down to
qemuMigrationBegin in a separate patch and then just use the code from v12
(without using the offline label).
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 5f8a9c5..66fbc02 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -72,6 +72,7 @@ enum qemuMigrationCookieFlags {
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
QEMU_MIGRATION_COOKIE_FLAG_NETWORK,
+ QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST
};
@@ -79,13 +80,14 @@ enum qemuMigrationCookieFlags {
VIR_ENUM_DECL(qemuMigrationCookieFlag);
VIR_ENUM_IMPL(qemuMigrationCookieFlag,
QEMU_MIGRATION_COOKIE_FLAG_LAST,
- "graphics", "lockstate", "persistent",
"network");
+ "graphics", "lockstate", "persistent",
"network", "offline");
enum qemuMigrationCookieFeatures {
QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
QEMU_MIGRATION_COOKIE_PERSISTENT = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
QEMU_MIGRATION_COOKIE_NETWORK = (1 << QEMU_MIGRATION_COOKIE_FLAG_NETWORK),
+ QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
};
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
@@ -594,6 +596,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
if ((mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) && mig->network)
qemuMigrationCookieNetworkXMLFormat(buf, mig->network);
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE)
+ virBufferAsprintf(buf, " <offline/>\n");
+
virBufferAddLit(buf, "</qemu-migration>\n");
return 0;
}
@@ -874,6 +879,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
(!(mig->network = qemuMigrationCookieNetworkXMLParse(ctxt))))
goto error;
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) {
+ if (virXPathBoolean("count(./offline) > 0", ctxt))
+ mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
+ }
+
return 0;
error:
@@ -938,6 +948,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
return -1;
}
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+ mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
+ }
+
if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
return -1;
Oh, I'm sorry for not noticing this earlier, but why exactly do we need this
<offline/> element in migration cookie? It doesn't look like we need to store
some additional data required for offline migration. I think just passing the
VIR_MIGRATE_OFFLINE flag will be better. Not to mention that if an older
libvirt gets a cookie with <offline/> element, it will just ignore it while
passing a flag (which the code should already been doing anyway) should make
it fail for unsupported flag.
@@ -1443,6 +1457,24 @@ char *qemuMigrationBegin(struct qemud_driver
*driver,
QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ if (flags & (VIR_MIGRATE_NON_SHARED_DISK|
+ VIR_MIGRATE_NON_SHARED_INC)) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("offline migration cannot handle
non-shared storage"));
+ goto cleanup;
+ }
+ if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
+ virReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("offline migration must be specified
with the persistent flag set"));
+ goto cleanup;
+ }
+ if (qemuMigrationBakeCookie(mig, driver, vm,
+ cookieout, cookieoutlen,
+ QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
+ goto cleanup;
+ }
+
if (xmlin) {
if (!(def = virDomainDefParseString(driver->caps, xmlin,
QEMU_EXPECTED_VIRT_TYPES,
Good, just wrap the two long lines and create a separate patch (which this one
will depend on) to move qemuDomainCheckEjectableMedia here as well.
@@ -1607,6 +1639,15 @@ qemuMigrationPrepareAny(struct qemud_driver
*driver,
goto endjob;
}
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
+ QEMU_MIGRATION_COOKIE_OFFLINE)))
+ return ret;
AS I wrote in my last review, this needs to call goto endjob rather than
returning directly.
+
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+ ret = 0;
+ goto done;
+ }
+
/* Start the QEMU daemon, with the same command-line arguments plus
* -incoming $migrateFrom
*/
...
@@ -2150,6 +2192,9 @@ qemuMigrationRun(struct qemud_driver *driver,
return -1;
}
+ if (flags & VIR_MIGRATE_OFFLINE)
+ return 0;
+
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
QEMU_MIGRATION_COOKIE_GRAPHICS)))
goto cleanup;
I still think we should not even get into qemuMigrationRun when doing offline
migration.
@@ -2665,7 +2710,12 @@ static int doPeer2PeerMigrate3(struct
qemud_driver *driver,
uri, &uri_out, flags, dname, resource, dom_xml);
qemuDomainObjExitRemoteWithDriver(driver, vm);
}
+
VIR_FREE(dom_xml);
+
+ if (flags & VIR_MIGRATE_OFFLINE)
+ goto cleanup;
+
if (ret == -1)
goto cleanup;
Quoting from my previous review:
This will skip not only Perform but also Finish phase in peer-to-peer
migration. You want to jump to finish label and do that *after* the check
for ret == 1.
@@ -2771,7 +2821,7 @@ finish:
vm->def->name);
cleanup:
- if (ddomain) {
+ if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
virObjectUnref(ddomain);
ret = 0;
} else {
I think this should not be changed at all.
...
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index 393b67b..54ba63a 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -6644,6 +6644,7 @@ static const vshCmdInfo info_migrate[] = {
static const vshCmdOptDef opts_migrate[] = {
{"live", VSH_OT_BOOL, 0, N_("live migration")},
+ {"offline", VSH_OT_BOOL, 0, N_("offline (domain's inactive)
migration")},
{"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")},
{"direct", VSH_OT_BOOL, 0, N_("direct migration")},
{"tunneled", VSH_OT_ALIAS, 0, "tunnelled"},
@@ -6729,6 +6730,15 @@ doMigrate(void *opaque)
if (vshCommandOptBool(cmd, "unsafe"))
flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) {
+ flags |= VIR_MIGRATE_OFFLINE;
+ }
+
+ if (virDomainIsActive(dom) && (flags & VIR_MIGRATE_OFFLINE)) {
+ vshError(ctl, "%s", _("domain is active, offline migration for
inactive domain only"));
+ goto out;
+ }
+
Another thing I didn't notice last time :-( Is there any reason why offline
migrating a running domain should be forbidden? But even if there was a
reason, this check doesn't belong to virsh.
And --ofline flag for virsh migrate should also be documented in virsh man
page.
Jirka