Hi, Daniel & Eric
can you help to review this patch again?
Thanks!
在 2012-09-17一的 08:45 +0800,liguang写道:
> original migration did not aware of offline case
> so, add code to support offline migration quietly
> (did not disturb original migration) by pass
> VIR_MIGRATE_OFFLINE flag to migration APIs if only
> the domain is really inactive, and
> migration process will not puzzled by domain
> offline and exit unexpectedly.
> these changes did not take care of disk images the
> domain required, for them could be transferred by
> other APIs as suggested, then VIR_MIGRATE_OFFLINE
> should not combined with VIR_MIGRATE_NON_SHARED_*.
> so, this migration result is just make domain
> definition alive at target side.
>
> Signed-off-by: liguang <lig.fnst(a)cn.fujitsu.com>
> ---
> include/libvirt/libvirt.h.in | 1 +
> src/qemu/qemu_driver.c | 15 ++++++++++++
> src/qemu/qemu_migration.c | 52 ++++++++++++++++++++++++++++++++++++-----
> src/qemu/qemu_migration.h | 3 +-
> tools/virsh-domain.c | 6 +++++
> 5 files changed, 69 insertions(+), 8 deletions(-)
>
> diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
> index cfe5047..77df2ab 100644
> --- a/include/libvirt/libvirt.h.in
> +++ b/include/libvirt/libvirt.h.in
> @@ -995,6 +995,7 @@ typedef enum {
> * whole migration process; this will
be used automatically
> * when supported */
> VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it
is considered unsafe */
> + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */
> } virDomainMigrateFlags;
>
> /* Domain migration. */
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index b12d9bc..2380ccc 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -9641,6 +9641,15 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
> }
>
> if (!virDomainObjIsActive(vm)) {
> + if (flags & VIR_MIGRATE_OFFLINE) {
> + if (flags & (VIR_MIGRATE_NON_SHARED_DISK|
> + VIR_MIGRATE_NON_SHARED_INC)) {
> + virReportError(VIR_ERR_OPERATION_INVALID,
> + "%s", _("migrating storage handled by
volume APIs"));
> + goto endjob;
> + }
> + goto offline;
> + }
> virReportError(VIR_ERR_OPERATION_INVALID,
> "%s", _("domain is not running"));
> goto endjob;
> @@ -9653,6 +9662,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
> if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
> goto endjob;
>
> +offline:
> if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
> cookieout, cookieoutlen,
> flags)))
> @@ -9888,6 +9898,11 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
> goto cleanup;
> }
>
> + if (flags & VIR_MIGRATE_OFFLINE) {
> + ret = 0;
> + goto cleanup;
> + }
> +
> if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
> goto cleanup;
>
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index 1b21ef6..cb63264 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags {
> QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
> QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
> QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
> + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
>
> QEMU_MIGRATION_COOKIE_FLAG_LAST
> };
> @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags {
> VIR_ENUM_DECL(qemuMigrationCookieFlag);
> VIR_ENUM_IMPL(qemuMigrationCookieFlag,
> QEMU_MIGRATION_COOKIE_FLAG_LAST,
> - "graphics", "lockstate",
"persistent");
> + "graphics", "lockstate", "persistent",
"offline");
>
> enum qemuMigrationCookieFeatures {
> QEMU_MIGRATION_COOKIE_GRAPHICS = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
> QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
> QEMU_MIGRATION_COOKIE_PERSISTENT = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
> + QEMU_MIGRATION_COOKIE_OFFLINE = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
> };
>
> typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
> @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
> virBufferAdjustIndent(buf, -2);
> }
>
> + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE)
> + virBufferAsprintf(buf, " <offline/>\n");
> +
> virBufferAddLit(buf, "</qemu-migration>\n");
> return 0;
> }
> @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
> VIR_FREE(nodes);
> }
>
> + if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) {
> + if (virXPathBoolean("count(./offline) > 0", ctxt))
> + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> + }
> +
> return 0;
>
> error:
> @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
> qemuMigrationCookieAddPersistent(mig, dom) < 0)
> return -1;
>
> + if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
> + }
> +
> if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
> return -1;
>
> @@ -1151,6 +1165,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
> QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
> goto cleanup;
>
> + if (flags & VIR_MIGRATE_OFFLINE) {
> + if (qemuMigrationBakeCookie(mig, driver, vm,
> + cookieout, cookieoutlen,
> + QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
> + goto cleanup;
> + }
> +
> if (xmlin) {
> if (!(def = virDomainDefParseString(driver->caps, xmlin,
> QEMU_EXPECTED_VIRT_TYPES,
> @@ -1314,6 +1335,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
> goto endjob;
> }
>
> + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
> + QEMU_MIGRATION_COOKIE_OFFLINE)))
> + return ret;
> +
> + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
> + ret = 0;
> + goto cleanup;
> + }
> +
> /* Start the QEMU daemon, with the same command-line arguments plus
> * -incoming $migrateFrom
> */
> @@ -1856,7 +1886,8 @@ qemuMigrationRun(struct qemud_driver *driver,
> virLockManagerPluginGetName(driver->lockManager));
> return -1;
> }
> -
> + if (flags & VIR_MIGRATE_OFFLINE)
> + return 0;
> if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
> QEMU_MIGRATION_COOKIE_GRAPHICS)))
> goto cleanup;
> @@ -2372,6 +2403,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
> qemuDomainObjExitRemoteWithDriver(driver, vm);
> }
> VIR_FREE(dom_xml);
> + if (flags & VIR_MIGRATE_OFFLINE)
> + goto cleanup;
> if (ret == -1)
> goto cleanup;
>
> @@ -2477,7 +2510,7 @@ finish:
> vm->def->name);
>
> cleanup:
> - if (ddomain) {
> + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
> virObjectUnref(ddomain);
> ret = 0;
> } else {
> @@ -2554,7 +2587,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
> }
>
> /* domain may have been stopped while we were talking to remote daemon */
> - if (!virDomainObjIsActive(vm)) {
> + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
> virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> _("guest unexpectedly quit"));
> goto cleanup;
> @@ -2617,7 +2650,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
> if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
> goto cleanup;
>
> - if (!virDomainObjIsActive(vm)) {
> + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
> virReportError(VIR_ERR_OPERATION_INVALID,
> "%s", _("domain is not running"));
> goto endjob;
> @@ -2941,6 +2974,8 @@ qemuMigrationFinish(struct qemud_driver *driver,
> */
> if (retcode == 0) {
> if (!virDomainObjIsActive(vm)) {
> + if (flags & VIR_MIGRATE_OFFLINE)
> + goto offline;
> virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> _("guest unexpectedly quit"));
> goto endjob;
> @@ -3038,7 +3073,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
> goto endjob;
> }
> }
> -
> + offline:
> dom = virGetDomain (dconn, vm->def->name, vm->def->uuid);
>
> event = virDomainEventNewFromObj(vm,
> @@ -3120,7 +3155,10 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
>
> if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
> return -1;
> -
> + if (flags & VIR_MIGRATE_OFFLINE) {
> + rv = 0;
> + goto cleanup;
> + }
> /* Did the migration go as planned? If yes, kill off the
> * domain object, but if no, resume CPUs
> */
> diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
> index 1740204..2bcaea0 100644
> --- a/src/qemu/qemu_migration.h
> +++ b/src/qemu/qemu_migration.h
> @@ -36,7 +36,8 @@
> VIR_MIGRATE_NON_SHARED_DISK | \
> VIR_MIGRATE_NON_SHARED_INC | \
> VIR_MIGRATE_CHANGE_PROTECTION | \
> - VIR_MIGRATE_UNSAFE)
> + VIR_MIGRATE_UNSAFE | \
> + VIR_MIGRATE_OFFLINE)
>
> enum qemuMigrationJobPhase {
> QEMU_MIGRATION_PHASE_NONE = 0,
> diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
> index 4684466..ec25043 100644
> --- a/tools/virsh-domain.c
> +++ b/tools/virsh-domain.c
> @@ -6525,6 +6525,7 @@ static const vshCmdOptDef opts_migrate[] = {
> {"dname", VSH_OT_DATA, 0, N_("rename to new name during
migration (if supported)")},
> {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live
migration exceeds timeout (in seconds)")},
> {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML
for the target")},
> + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")},
> {NULL, 0, 0, NULL}
> };
>
> @@ -6591,6 +6592,11 @@ doMigrate(void *opaque)
> if (vshCommandOptBool(cmd, "unsafe"))
> flags |= VIR_MIGRATE_UNSAFE;
>
> + if (vshCommandOptBool(cmd, "offline")) {
> + if (!virDomainIsActive(dom))
> + flags |= VIR_MIGRATE_OFFLINE;
> + }
> +
> if (xmlfile &&
> virFileReadAll(xmlfile, 8192, &xml) < 0) {
> vshError(ctl, _("file '%s' doesn't exist"),
xmlfile);
--
liguang lig.fnst(a)cn.fujitsu.com
FNST linux kernel team
--
libvir-list mailing list
libvir-list(a)redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list