[libvirt] [PATCH v11] support offline migration

original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself. Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags; /* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, } if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); + goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob; +offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index db69a0a..b2f921e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE, QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); } + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); } + if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0; error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1; + if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1; @@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup; + if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; } + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0; if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup; @@ -2494,7 +2527,7 @@ finish: vm->def->name); cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, } /* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup; - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; } + offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; } - if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } } /* Guest is successfully running, so cancel previous auto destroy */ @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver, endjob: if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; + } else if (flags & VIR_MIGRATE_OFFLINE) { } else if (!vm->persistent && !virDomainObjIsActive(vm)) { qemuDomainRemoveInactive(driver, vm); vm = NULL; @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver, if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } } +offline: qemuMigrationCookieFree(mig); rv = 0; diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..b4f6a77 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE) enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 505169b..2218379 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} }; @@ -6713,6 +6714,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE; + if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); -- 1.7.2.5

Supports migrating a domain that is currently inactive. This patchset adds a new migration flag called VIR_MIGRATE_OFFLINE, which can only be used on domains that are inactive. Offline migration does not support copying non-shared storage in any case. --- src/qemu/qemu_driver.c | 29 ++++++++++++++++------------- src/qemu/qemu_migration.c | 5 ++++- tools/virsh-domain.c | 9 ++++++--- 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 6e3747f..f53bf3d 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9624,34 +9624,37 @@ qemuDomainMigrateBegin3(virDomainPtr domain, asyncJob = QEMU_ASYNC_JOB_NONE; } + /* Domain is not active */ if (!virDomainObjIsActive(vm)) { + /* Domain is not active and offline migration requested */ if (flags & VIR_MIGRATE_OFFLINE) { - if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", _("migrating storage handled by volume APIs")); + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("offline migration cannot copy non-shared storage")); goto endjob; } if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("offline migration must be specified with the persistent flag set")); goto endjob; } - goto offline; + } else { + /* Domain is not active and NO offline migration */ + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; } - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", _("domain is not running")); - goto endjob; } /* Check if there is any ejected media. * We don't want to require them on the destination. */ + if (virDomainObjIsActive(vm)) { + if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) + goto endjob; + } - if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) - goto endjob; - -offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index de9d55d..54e94a4 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1904,8 +1904,10 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } + if (flags & VIR_MIGRATE_OFFLINE) return 0; + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -3008,7 +3010,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; } - offline: +offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3176,6 +3178,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) goto offline; diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 4d5a242..8196ba6 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6644,6 +6644,7 @@ static const vshCmdInfo info_migrate[] = { static const vshCmdOptDef opts_migrate[] = { {"live", VSH_OT_BOOL, 0, N_("live migration")}, + {"offline", VSH_OT_BOOL, 0, N_("offline (inactive domain) migration")}, {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")}, {"direct", VSH_OT_BOOL, 0, N_("direct migration")}, {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"}, @@ -6663,7 +6664,6 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, - {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} }; @@ -6730,9 +6730,12 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE; - if (vshCommandOptBool(cmd, "offline")) { - if (!virDomainIsActive(dom)) + if (vshCommandOptBool(cmd, "offline")) flags |= VIR_MIGRATE_OFFLINE; + + if (virDomainIsActive(dom) && (flags & VIR_MIGRATE_OFFLINE)) { + vshError(ctl, "%s", _("unable to perform offline migration when the domain is active")); + goto out; } if (xmlfile && -- 1.7.8.6

On Thu, Oct 18, 2012 at 1:06 AM, Doug Goldstein <cardoe@cardoe.com> wrote:
Supports migrating a domain that is currently inactive. This patchset adds a new migration flag called VIR_MIGRATE_OFFLINE, which can only be used on domains that are inactive. Offline migration does not support copying non-shared storage in any case. ---
My git send-email foo failed me or Gmail's interface is failing me. This was really suppose to be in reply to "PATCH v11 support offline migration", https://www.redhat.com/archives/libvir-list/2012-October/msg00950.html I would recommend squashing this into that patch. -- Doug Goldstein

On Wed, Oct 17, 2012 at 7:42 PM, liguang <lig.fnst@cn.fujitsu.com> wrote:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST"));
I feel like maybe we should just assume that VIR_MIGRATE_OFFLINE implies VIR_MIGRATE_PERSIST_DEST and if its not supplied add it to the flags. Dan, do you agree or disagree?
+ goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob;
+offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index db69a0a..b2f921e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline");
enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), };
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); }
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); }
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0;
error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1;
@@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; }
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0;
This feels wrong since we're checking the state with the locking manager. But really since we're not dealing with the disks at all since the domain is being moved over offline we shouldn't check the state we should just migrate. So it seems this should be above the locking manager check. But again I defer to Dan as he's more knowledgable about this than I am.
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup;
@@ -2494,7 +2527,7 @@ finish: vm->def->name);
cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; }
+ offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver, endjob: if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; + } else if (flags & VIR_MIGRATE_OFFLINE) { } else if (!vm->persistent && !virDomainObjIsActive(vm)) { qemuDomainRemoveInactive(driver, vm); vm = NULL; @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto offline;
/* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+offline: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..b4f6a77 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 505169b..2218379 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} };
@@ -6713,6 +6714,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); -- 1.7.2.5
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
-- Doug Goldstein

On Thu, Oct 18, 2012 at 01:11:19AM -0500, Doug Goldstein wrote:
On Wed, Oct 17, 2012 at 7:42 PM, liguang <lig.fnst@cn.fujitsu.com> wrote:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST"));
I feel like maybe we should just assume that VIR_MIGRATE_OFFLINE implies VIR_MIGRATE_PERSIST_DEST and if its not supplied add it to the flags. Dan, do you agree or disagree?
I didn't want it to be implied. The rationale is that in the future we might add support for copying storage of offline domains, and we want to be able to request copying of storage, separately from copying the XML. This can only be done if you require explicit flags, without having stuff implied. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

ping ... 在 2012-10-18四的 08:42 +0800,liguang写道:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); + goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob;
+offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index db69a0a..b2f921e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline");
enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), };
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); }
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); }
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0;
error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1;
@@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; }
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0; if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup;
@@ -2494,7 +2527,7 @@ finish: vm->def->name);
cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; }
+ offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver, endjob: if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; + } else if (flags & VIR_MIGRATE_OFFLINE) { } else if (!vm->persistent && !virDomainObjIsActive(vm)) { qemuDomainRemoveInactive(driver, vm); vm = NULL; @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto offline;
/* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+offline: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..b4f6a77 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 505169b..2218379 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} };
@@ -6713,6 +6714,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team

Hello, just a nit: On Tuesday 23 October 2012 10:09:11 li guang wrote:
+++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, } if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); ^ be" ? or even "should" -> "must be"?
Sincerely Philipp -- Philipp Hahn Open Source Software Engineer hahn@univention.de Univention GmbH be open. fon: +49 421 22 232- 0 Mary-Somerville-Str.1 D-28359 Bremen fax: +49 421 22 232-99 http://www.univention.de/

On Fri, Oct 26, 2012 at 11:59 AM, Philipp Hahn <hahn@univention.de> wrote:
Hello,
just a nit:
On Tuesday 23 October 2012 10:09:11 li guang wrote:
+++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); ^ be" ? or even "should" -> "must be"?
Sincerely Philipp --
One of the things that I addressed with my code review patch to be squashed into this patch. -- Doug Goldstein

ping ... 在 2012-10-23二的 16:09 +0800,li guang写道:
ping ...
在 2012-10-18四的 08:42 +0800,liguang写道:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); + goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob;
+offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index db69a0a..b2f921e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline");
enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), };
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); }
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); }
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0;
error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1;
@@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; }
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0; if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup;
@@ -2494,7 +2527,7 @@ finish: vm->def->name);
cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; }
+ offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver, endjob: if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; + } else if (flags & VIR_MIGRATE_OFFLINE) { } else if (!vm->persistent && !virDomainObjIsActive(vm)) { qemuDomainRemoveInactive(driver, vm); vm = NULL; @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto offline;
/* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+offline: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..b4f6a77 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 505169b..2218379 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} };
@@ -6713,6 +6714,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team

Unfortunately this patch conflicts horribly with this recently merged patch commit 2f3e2c0c434218a3d656c08779cb98b327170e11 Author: Kyle Mestery <kmestery@cisco.com> Date: Mon Oct 1 11:18:22 2012 -0400 qemu_migration: Transport OVS per-port data during live migration so has many failures when applying. Can you rebase it, and include the additional fixes from Doug's patch here: https://www.redhat.com/archives/libvir-list/2012-October/msg00957.html Regards, Daniel On Thu, Nov 01, 2012 at 01:47:00PM +0800, li guang wrote:
ping ...
在 2012-10-23二的 16:09 +0800,li guang写道:
ping ...
在 2012-10-18四的 08:42 +0800,liguang写道:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); + goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob;
+offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index db69a0a..b2f921e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline");
enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), };
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); }
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); }
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0;
error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1;
@@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; }
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0; if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup;
@@ -2494,7 +2527,7 @@ finish: vm->def->name);
cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; }
+ offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver, endjob: if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; + } else if (flags & VIR_MIGRATE_OFFLINE) { } else if (!vm->persistent && !virDomainObjIsActive(vm)) { qemuDomainRemoveInactive(driver, vm); vm = NULL; @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto offline;
/* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+offline: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..b4f6a77 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 505169b..2218379 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} };
@@ -6713,6 +6714,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
-- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

re-based. 2 weeks elapsed since my last patch, it's no curious that conflict comes up, so can you check this patch as soon as possible this time? 在 2012-11-01四的 11:59 +0000,Daniel P. Berrange写道:
Unfortunately this patch conflicts horribly with this recently merged patch
commit 2f3e2c0c434218a3d656c08779cb98b327170e11 Author: Kyle Mestery <kmestery@cisco.com> Date: Mon Oct 1 11:18:22 2012 -0400
qemu_migration: Transport OVS per-port data during live migration
so has many failures when applying. Can you rebase it, and include the additional fixes from Doug's patch here:
https://www.redhat.com/archives/libvir-list/2012-October/msg00957.html
Regards, Daniel
On Thu, Nov 01, 2012 at 01:47:00PM +0800, li guang wrote:
ping ...
在 2012-10-23二的 16:09 +0800,li guang写道:
ping ...
在 2012-10-18四的 08:42 +0800,liguang写道:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. if you want a persistent migration, you should do "virsh migrate --persistent" youself.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++ src/qemu/qemu_migration.c | 60 +++++++++++++++++++++++++++++++++++------ src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 ++++ 5 files changed, 75 insertions(+), 10 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 81f12a4..1cebc21 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 97ad23e..38bfcab 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9622,6 +9622,20 @@ qemuDomainMigrateBegin3(virDomainPtr domain, }
if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("VIR_MIGRATE_OFFLINE should combined with VIR_MIGRATE_PERSIST_DEST")); + goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9634,6 +9648,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob;
+offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index db69a0a..b2f921e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline");
enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), };
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); }
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); }
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0;
error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1;
@@ -1168,6 +1182,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1331,6 +1352,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; }
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1873,7 +1903,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0; if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2389,6 +2420,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup;
@@ -2494,7 +2527,7 @@ finish: vm->def->name);
cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2571,7 +2604,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2634,7 +2667,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2958,6 +2991,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -2973,6 +3008,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; }
+ offline: if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3020,7 +3056,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3069,9 +3105,11 @@ qemuMigrationFinish(struct qemud_driver *driver, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3091,6 +3129,7 @@ qemuMigrationFinish(struct qemud_driver *driver, endjob: if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; + } else if (flags & VIR_MIGRATE_OFFLINE) { } else if (!vm->persistent && !virDomainObjIsActive(vm)) { qemuDomainRemoveInactive(driver, vm); vm = NULL; @@ -3137,6 +3176,8 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto offline;
/* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs @@ -3173,6 +3214,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+offline: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..b4f6a77 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 505169b..2218379 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6647,6 +6647,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} };
@@ -6713,6 +6714,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team
participants (6)
-
Daniel P. Berrange
-
Doug Goldstein
-
Doug Goldstein
-
li guang
-
liguang
-
Philipp Hahn