[libvirt] [PATCH v8][re-send] support offline migration

original migration did not aware of offline case so, add code to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. so, this migration result is just make domain definition alive at target side. Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/qemu/qemu_driver.c | 15 ++++++++++++ src/qemu/qemu_migration.c | 52 ++++++++++++++++++++++++++++++++++++----- src/qemu/qemu_migration.h | 3 +- tools/virsh-domain.c | 6 +++++ 5 files changed, 69 insertions(+), 8 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index cfe5047..77df2ab 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags; /* Domain migration. */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index b12d9bc..2380ccc 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9641,6 +9641,15 @@ qemuDomainMigrateBegin3(virDomainPtr domain, } if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK| + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("migrating storage handled by volume APIs")); + goto endjob; + } + goto offline; + } virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9653,6 +9662,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) goto endjob; +offline: if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, flags))) @@ -9888,6 +9898,11 @@ qemuDomainMigrateConfirm3(virDomainPtr domain, goto cleanup; } + if (flags & VIR_MIGRATE_OFFLINE) { + ret = 0; + goto cleanup; + } + if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) goto cleanup; diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 1b21ef6..cb63264 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, + QEMU_MIGRATION_COOKIE_FLAG_OFFLINE, QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate", "persistent"); + "graphics", "lockstate", "persistent", "offline"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), + QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -439,6 +441,9 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver, virBufferAdjustIndent(buf, -2); } + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) + virBufferAsprintf(buf, " <offline/>\n"); + virBufferAddLit(buf, "</qemu-migration>\n"); return 0; } @@ -662,6 +667,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(nodes); } + if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) { + if (virXPathBoolean("count(./offline) > 0", ctxt)) + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + return 0; error: @@ -721,6 +731,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddPersistent(mig, dom) < 0) return -1; + if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE; + } + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig))) return -1; @@ -1151,6 +1165,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup; + if (flags & VIR_MIGRATE_OFFLINE) { + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_OFFLINE) < 0) + goto cleanup; + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1314,6 +1335,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, goto endjob; } + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_OFFLINE))) + return ret; + + if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) { + ret = 0; + goto cleanup; + } + /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ @@ -1856,7 +1886,8 @@ qemuMigrationRun(struct qemud_driver *driver, virLockManagerPluginGetName(driver->lockManager)); return -1; } - + if (flags & VIR_MIGRATE_OFFLINE) + return 0; if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; @@ -2372,6 +2403,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, qemuDomainObjExitRemoteWithDriver(driver, vm); } VIR_FREE(dom_xml); + if (flags & VIR_MIGRATE_OFFLINE) + goto cleanup; if (ret == -1) goto cleanup; @@ -2477,7 +2510,7 @@ finish: vm->def->name); cleanup: - if (ddomain) { + if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) { virObjectUnref(ddomain); ret = 0; } else { @@ -2554,7 +2587,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, } /* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2617,7 +2650,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup; - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -2941,6 +2974,8 @@ qemuMigrationFinish(struct qemud_driver *driver, */ if (retcode == 0) { if (!virDomainObjIsActive(vm)) { + if (flags & VIR_MIGRATE_OFFLINE) + goto offline; virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; @@ -3038,7 +3073,7 @@ qemuMigrationFinish(struct qemud_driver *driver, goto endjob; } } - + offline: dom = virGetDomain (dconn, vm->def->name, vm->def->uuid); event = virDomainEventNewFromObj(vm, @@ -3120,7 +3155,10 @@ int qemuMigrationConfirm(struct qemud_driver *driver, if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; - + if (flags & VIR_MIGRATE_OFFLINE) { + rv = 0; + goto cleanup; + } /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs */ diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 1740204..2bcaea0 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE) enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 4684466..ec25043 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6525,6 +6525,7 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("for offline migration")}, {NULL, 0, 0, NULL} }; @@ -6591,6 +6592,11 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE; + if (vshCommandOptBool(cmd, "offline")) { + if (!virDomainIsActive(dom)) + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); -- 1.7.2.5

On Thu, Sep 20, 2012 at 01:33:39PM +0800, liguang wrote:
original migration did not aware of offline case so, add code to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. so, this migration result is just make domain definition alive at target side.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
I've tested this patch and it does not appear to work at all I have 2 hosts, avocado & mustard, and guest 'vm1' I want to migrate from avocado to mustard: # virsh -c avocado list --all Id Name State ---------------------------------------------------- - debian6-x86_64 shut off - f13x86_64 shut off - f16_x86_64 shut off - f16x86_64 shut off - f18x86_64 shut off - freebsd-8.2-x86_64 shut off - netbsd-5.1-x86_64 shut off - openbsd-4.9-x86_64 shut off - opensuse-11.4-x86_64 shut off - rhel5x86_64 shut off - rhel6x86_64 shut off - rhel6x86_64ga shut off - ubuntu-11.10-x86_64 shut off - vm1 shut off # virsh -c mustard list --all Id Name State ---------------------------------------------------- 2 instance-00000001 running - rhel6x86_64 shut off The guest is not running: # virsh -c avocado dominfo vm1 Id: - Name: vm1 UUID: c7b3edbd-edaf-9455-926a-d65c16db1800 OS Type: hvm State: shut off CPU(s): 1 Max memory: 219200 kB Used memory: 219136 kB Persistent: yes Autostart: disable Managed save: no Security model: selinux Security DOI: 0 So a normal migrate fails, which is good. # virsh -c avocado migrate vm1 mustard error: Requested operation is not valid: domain is not running Now I try your new offline migrate, which claims to succeed # virsh -c avocado migrate --offline vm1 mustard We should have rejected an attempt to use --offline, unless the user has also specified either --persistent, or --copy-storage-all Now I repeat using --persistent which succeeds: # virsh -c avocado migrate --persistent --offline vm1 mustard But it has not created the guest on the target: # virsh -c mustard list --all Id Name State ---------------------------------------------------- 2 instance-00000001 running - rhel6x86_64 shut off Also if I add --undefinesource it also succeeds # /home/berrange/src/virt/libvirt/tools/virsh -c avocado migrate --persistent --undefinesource --offline vm1 mustard But again has not created the guest on the target # virsh -c mustard list --all Id Name State ---------------------------------------------------- 2 instance-00000001 running - rhel6x86_64 shut off Now has it removed it on the source # virsh -c avocado list --all Id Name State ---------------------------------------------------- - debian6-x86_64 shut off - f13x86_64 shut off - f16_x86_64 shut off - f16x86_64 shut off - f18x86_64 shut off - freebsd-8.2-x86_64 shut off - netbsd-5.1-x86_64 shut off - openbsd-4.9-x86_64 shut off - opensuse-11.4-x86_64 shut off - rhel5x86_64 shut off - rhel6x86_64 shut off - rhel6x86_64ga shut off - ubuntu-11.10-x86_64 shut off - vm1 shut off Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

Hi, Daniel I did experience what you did, but when I prepared disk images which domain required, then run virsh start domain_name, It be started normally, so, there's maybe something wrong with virsh list for domain state can't be listed. did you try further to start the migrated domain? so, try it. Thanks! 在 2012-09-21五的 11:50 +0100,Daniel P. Berrange写道:
On Thu, Sep 20, 2012 at 01:33:39PM +0800, liguang wrote:
original migration did not aware of offline case so, add code to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE should not combined with VIR_MIGRATE_NON_SHARED_*. so, this migration result is just make domain definition alive at target side.
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
I've tested this patch and it does not appear to work at all
I have 2 hosts, avocado & mustard, and guest 'vm1' I want to migrate from avocado to mustard:
# virsh -c avocado list --all Id Name State ---------------------------------------------------- - debian6-x86_64 shut off - f13x86_64 shut off - f16_x86_64 shut off - f16x86_64 shut off - f18x86_64 shut off - freebsd-8.2-x86_64 shut off - netbsd-5.1-x86_64 shut off - openbsd-4.9-x86_64 shut off - opensuse-11.4-x86_64 shut off - rhel5x86_64 shut off - rhel6x86_64 shut off - rhel6x86_64ga shut off - ubuntu-11.10-x86_64 shut off - vm1 shut off
# virsh -c mustard list --all Id Name State ---------------------------------------------------- 2 instance-00000001 running - rhel6x86_64 shut off
The guest is not running:
# virsh -c avocado dominfo vm1 Id: - Name: vm1 UUID: c7b3edbd-edaf-9455-926a-d65c16db1800 OS Type: hvm State: shut off CPU(s): 1 Max memory: 219200 kB Used memory: 219136 kB Persistent: yes Autostart: disable Managed save: no Security model: selinux Security DOI: 0
So a normal migrate fails, which is good.
# virsh -c avocado migrate vm1 mustard error: Requested operation is not valid: domain is not running
Now I try your new offline migrate, which claims to succeed
# virsh -c avocado migrate --offline vm1 mustard
We should have rejected an attempt to use --offline, unless the user has also specified either --persistent, or --copy-storage-all
Now I repeat using --persistent which succeeds:
# virsh -c avocado migrate --persistent --offline vm1 mustard
But it has not created the guest on the target:
# virsh -c mustard list --all Id Name State ---------------------------------------------------- 2 instance-00000001 running - rhel6x86_64 shut off
Also if I add --undefinesource it also succeeds
# /home/berrange/src/virt/libvirt/tools/virsh -c avocado migrate --persistent --undefinesource --offline vm1 mustard
But again has not created the guest on the target
# virsh -c mustard list --all Id Name State ---------------------------------------------------- 2 instance-00000001 running - rhel6x86_64 shut off
Now has it removed it on the source
# virsh -c avocado list --all Id Name State ---------------------------------------------------- - debian6-x86_64 shut off - f13x86_64 shut off - f16_x86_64 shut off - f16x86_64 shut off - f18x86_64 shut off - freebsd-8.2-x86_64 shut off - netbsd-5.1-x86_64 shut off - openbsd-4.9-x86_64 shut off - opensuse-11.4-x86_64 shut off - rhel5x86_64 shut off - rhel6x86_64 shut off - rhel6x86_64ga shut off - ubuntu-11.10-x86_64 shut off - vm1 shut off
Regards, Daniel
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team

在 2012-09-21五的 11:50 +0100,Daniel P. Berrange写道:
# virsh -c avocado dominfo vm1 Id: - Name: vm1 UUID: c7b3edbd-edaf-9455-926a-d65c16db1800 OS Type: hvm State: shut off CPU(s): 1 Max memory: 219200 kB Used memory: 219136 kB Persistent: yes Autostart: disable Managed save: no
Security model: selinux
I did not test security model, sorry for that.
Security DOI: 0
So a normal migrate fails, which is good.
# virsh -c avocado migrate vm1 mustard error: Requested operation is not valid: domain is not running
Now I try your new offline migrate, which claims to succeed
# virsh -c avocado migrate --offline vm1 mustard
did this operations succeed? if not, maybe there's something wrong with target mustard. for me, after update libvirt at both source and target virsh migrate --offline vm1 qemu+ssh://target/system always be OK.
We should have rejected an attempt to use --offline, unless the user has also specified either --persistent, or --copy-storage-all
Regards, Daniel
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team
participants (3)
-
Daniel P. Berrange
-
li guang
-
liguang