[libvirt] [PATCH v15] support offline migration

original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE must not combined with VIR_MIGRATE_NON_SHARED_*. and you must do a persistent migration at same time, do "virsh migrate --offline --persistent ...". Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 4 + src/qemu/qemu_driver.c | 16 +++--- src/qemu/qemu_migration.c | 140 +++++++++++++++++++++++++++--------------- src/qemu/qemu_migration.h | 9 ++- tools/virsh-domain.c | 5 ++ tools/virsh.pod | 5 +- 7 files changed, 117 insertions(+), 63 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 49a361a..ea625b3 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -1092,6 +1092,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags; /* Domain migration. */ diff --git a/src/libvirt.c b/src/libvirt.c index bdb1dc6..6d749d9 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4827,6 +4827,10 @@ virDomainMigrateVersion3(virDomainPtr domain, if (uri_out) uri = uri_out; /* Did domainMigratePrepare3 change URI? */ + if (flags & VIR_MIGRATE_OFFLINE) { + cancelled = 0; + goto finish; + } /* Perform the migration. The driver isn't supposed to return * until the migration is complete. The src VM should remain * running, but in paused state until the destination can diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 595c452..1ba1665 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9625,7 +9625,7 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn, ret = qemuMigrationPrepareTunnel(driver, dconn, NULL, 0, NULL, NULL, /* No cookies in v2 */ - st, dname, dom_xml); + st, dname, dom_xml, flags); cleanup: qemuDriverUnlock(driver); @@ -9685,7 +9685,7 @@ qemudDomainMigratePrepare2(virConnectPtr dconn, ret = qemuMigrationPrepareDirect(driver, dconn, NULL, 0, NULL, NULL, /* No cookies */ uri_in, uri_out, - dname, dom_xml); + dname, dom_xml, flags); cleanup: qemuDriverUnlock(driver); @@ -9827,7 +9827,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, asyncJob = QEMU_ASYNC_JOB_NONE; } - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9836,9 +9836,9 @@ qemuDomainMigrateBegin3(virDomainPtr domain, /* Check if there is any ejected media. * We don't want to require them on the destination. */ - - if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE) && + qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) + goto endjob; if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, @@ -9922,7 +9922,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn, cookiein, cookieinlen, cookieout, cookieoutlen, uri_in, uri_out, - dname, dom_xml); + dname, dom_xml, flags); cleanup: qemuDriverUnlock(driver); @@ -9967,7 +9967,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn, ret = qemuMigrationPrepareTunnel(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, - st, dname, dom_xml); + st, dname, dom_xml, flags); qemuDriverUnlock(driver); cleanup: diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d52ec59..53171df 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1442,6 +1442,24 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup; + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", + _("offline migration cannot handle " + "non-shared storage")); + goto cleanup; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", + _("offline migration must be specified with " + "the persistent flag set")); + goto cleanup; + } + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1499,7 +1517,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, const char *dname, const char *dom_xml, const char *migrateFrom, - virStreamPtr st) + virStreamPtr st, + unsigned long flags) { virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; @@ -1609,15 +1628,18 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ - if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, - VIR_QEMU_PROCESS_START_PAUSED | - VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { - virDomainAuditStart(vm, "migrated", false); - /* Note that we don't set an error here because qemuProcessStart - * should have already done that. - */ - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], + NULL, NULL, + VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, + VIR_QEMU_PROCESS_START_PAUSED | + VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { + virDomainAuditStart(vm, "migrated", false); + /* Note that we don't set an error here because qemuProcessStart + * should have already done that. + */ + goto endjob; + } } if (tunnel) { @@ -1625,7 +1647,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); virDomainAuditStart(vm, "migrated", false); - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); + if (!(flags & VIR_MIGRATE_OFFLINE)) + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); goto endjob; } dataFD[1] = -1; /* 'st' owns the FD now & will close it */ @@ -1640,13 +1663,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, VIR_DEBUG("Received no lockstate"); } - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, - QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { - /* We could tear down the whole guest here, but - * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now. - */ - VIR_WARN("Unable to encode migration cookie"); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now. + */ + VIR_WARN("Unable to encode migration cookie"); + } } if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0) @@ -1708,7 +1733,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, int *cookieoutlen, virStreamPtr st, const char *dname, - const char *dom_xml) + const char *dom_xml, + unsigned long flags) { int ret; @@ -1722,7 +1748,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, */ ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, dname, dom_xml, - "stdio", st); + "stdio", st, flags); return ret; } @@ -1737,7 +1763,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, const char *uri_in, char **uri_out, const char *dname, - const char *dom_xml) + const char *dom_xml, + unsigned long flags) { static int port = 0; int this_port; @@ -1833,7 +1860,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, dname, dom_xml, - migrateFrom, NULL); + migrateFrom, NULL, flags); cleanup: VIR_FREE(hostname); if (ret != 0) @@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } + VIR_FREE(dom_xml); + if (ret == -1) goto cleanup; @@ -2858,7 +2887,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, } /* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2921,7 +2950,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup; - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -3245,26 +3274,27 @@ qemuMigrationFinish(struct qemud_driver *driver, * object, but if no, clean up the empty qemu process. */ if (retcode == 0) { - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; } - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + goto endjob; + } + if (mig->network) + if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation"); } - if (mig->network) - if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); - if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3312,7 +3342,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; } - if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3350,20 +3380,26 @@ qemuMigrationFinish(struct qemud_driver *driver, dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); + if (!(flags & VIR_MIGRATE_OFFLINE)) { event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, + VIR_DOMAIN_PAUSED_USER); + if (event) + qemuDomainEventQueue(driver, event); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + } } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } } /* Guest is successfully running, so cancel previous auto destroy */ @@ -3430,6 +3466,9 @@ int qemuMigrationConfirm(struct qemud_driver *driver, if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto done; + /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs */ @@ -3465,6 +3504,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } } +done: qemuMigrationCookieFree(mig); rv = 0; diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..f2dc5aa 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE) enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, @@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(struct qemud_driver *driver, int *cookieoutlen, virStreamPtr st, const char *dname, - const char *dom_xml); + const char *dom_xml, + unsigned long flags); int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, @@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(struct qemud_driver *driver, const char *uri_in, char **uri_out, const char *dname, - const char *dom_xml); + const char *dom_xml, + unsigned long flags); int qemuMigrationPerform(struct qemud_driver *driver, virConnectPtr conn, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index cc47383..5d18bdf 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6661,6 +6661,7 @@ static const vshCmdInfo info_migrate[] = { static const vshCmdOptDef opts_migrate[] = { {"live", VSH_OT_BOOL, 0, N_("live migration")}, + {"offline", VSH_OT_BOOL, 0, N_("offline (domain's inactive) migration")}, {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")}, {"direct", VSH_OT_BOOL, 0, N_("direct migration")}, {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"}, @@ -6746,6 +6747,10 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE; + if (vshCommandOptBool(cmd, "offline")) { + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); diff --git a/tools/virsh.pod b/tools/virsh.pod index 29be39e..b3ef64e 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1026,13 +1026,14 @@ I<--total> for only the total stats, I<start> for only the per-cpu stats of the CPUs from I<start>, I<count> for only I<count> CPUs' stats. -=item B<migrate> [I<--live>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] +=item B<migrate> [I<--live>] [I<--offline>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] [I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>] [I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>] I<domain> I<desturi> [I<migrateuri>] [I<dname>] [I<--timeout> B<seconds>] [I<--xml> B<file>] -Migrate domain to another host. Add I<--live> for live migration; I<--p2p> +Migrate domain to another host. Add I<--live> for live migration; +I<--offline> for offline (domain's inactive) migration; <--p2p> for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled> for tunnelled migration. I<--persistent> leaves the domain persistent on destination host, I<--undefinesource> undefines the domain on the source host, -- 1.7.1

ping ... 在 2012-11-21三的 16:28 +0800,liguang写道:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE must not combined with VIR_MIGRATE_NON_SHARED_*. and you must do a persistent migration at same time, do "virsh migrate --offline --persistent ...".
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 4 + src/qemu/qemu_driver.c | 16 +++--- src/qemu/qemu_migration.c | 140 +++++++++++++++++++++++++++--------------- src/qemu/qemu_migration.h | 9 ++- tools/virsh-domain.c | 5 ++ tools/virsh.pod | 5 +- 7 files changed, 117 insertions(+), 63 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 49a361a..ea625b3 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -1092,6 +1092,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/libvirt.c b/src/libvirt.c index bdb1dc6..6d749d9 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4827,6 +4827,10 @@ virDomainMigrateVersion3(virDomainPtr domain, if (uri_out) uri = uri_out; /* Did domainMigratePrepare3 change URI? */
+ if (flags & VIR_MIGRATE_OFFLINE) { + cancelled = 0; + goto finish; + } /* Perform the migration. The driver isn't supposed to return * until the migration is complete. The src VM should remain * running, but in paused state until the destination can diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 595c452..1ba1665 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9625,7 +9625,7 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
ret = qemuMigrationPrepareTunnel(driver, dconn, NULL, 0, NULL, NULL, /* No cookies in v2 */ - st, dname, dom_xml); + st, dname, dom_xml, flags);
cleanup: qemuDriverUnlock(driver); @@ -9685,7 +9685,7 @@ qemudDomainMigratePrepare2(virConnectPtr dconn, ret = qemuMigrationPrepareDirect(driver, dconn, NULL, 0, NULL, NULL, /* No cookies */ uri_in, uri_out, - dname, dom_xml); + dname, dom_xml, flags);
cleanup: qemuDriverUnlock(driver); @@ -9827,7 +9827,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, asyncJob = QEMU_ASYNC_JOB_NONE; }
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9836,9 +9836,9 @@ qemuDomainMigrateBegin3(virDomainPtr domain, /* Check if there is any ejected media. * We don't want to require them on the destination. */ - - if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE) && + qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) + goto endjob;
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, @@ -9922,7 +9922,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn, cookiein, cookieinlen, cookieout, cookieoutlen, uri_in, uri_out, - dname, dom_xml); + dname, dom_xml, flags);
cleanup: qemuDriverUnlock(driver); @@ -9967,7 +9967,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn, ret = qemuMigrationPrepareTunnel(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, - st, dname, dom_xml); + st, dname, dom_xml, flags); qemuDriverUnlock(driver);
cleanup: diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d52ec59..53171df 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1442,6 +1442,24 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", + _("offline migration cannot handle " + "non-shared storage")); + goto cleanup; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", + _("offline migration must be specified with " + "the persistent flag set")); + goto cleanup; + } + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1499,7 +1517,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, const char *dname, const char *dom_xml, const char *migrateFrom, - virStreamPtr st) + virStreamPtr st, + unsigned long flags) { virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; @@ -1609,15 +1628,18 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ - if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, - VIR_QEMU_PROCESS_START_PAUSED | - VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { - virDomainAuditStart(vm, "migrated", false); - /* Note that we don't set an error here because qemuProcessStart - * should have already done that. - */ - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], + NULL, NULL, + VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, + VIR_QEMU_PROCESS_START_PAUSED | + VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { + virDomainAuditStart(vm, "migrated", false); + /* Note that we don't set an error here because qemuProcessStart + * should have already done that. + */ + goto endjob; + } }
if (tunnel) { @@ -1625,7 +1647,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); virDomainAuditStart(vm, "migrated", false); - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); + if (!(flags & VIR_MIGRATE_OFFLINE)) + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); goto endjob; } dataFD[1] = -1; /* 'st' owns the FD now & will close it */ @@ -1640,13 +1663,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, VIR_DEBUG("Received no lockstate"); }
- if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, - QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { - /* We could tear down the whole guest here, but - * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now. - */ - VIR_WARN("Unable to encode migration cookie"); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now. + */ + VIR_WARN("Unable to encode migration cookie"); + } }
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0) @@ -1708,7 +1733,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, int *cookieoutlen, virStreamPtr st, const char *dname, - const char *dom_xml) + const char *dom_xml, + unsigned long flags) { int ret;
@@ -1722,7 +1748,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, */ ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, dname, dom_xml, - "stdio", st); + "stdio", st, flags); return ret; }
@@ -1737,7 +1763,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, const char *uri_in, char **uri_out, const char *dname, - const char *dom_xml) + const char *dom_xml, + unsigned long flags) { static int port = 0; int this_port; @@ -1833,7 +1860,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, dname, dom_xml, - migrateFrom, NULL); + migrateFrom, NULL, flags); cleanup: VIR_FREE(hostname); if (ret != 0) @@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } + VIR_FREE(dom_xml); + if (ret == -1) goto cleanup;
@@ -2858,7 +2887,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2921,7 +2950,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -3245,26 +3274,27 @@ qemuMigrationFinish(struct qemud_driver *driver, * object, but if no, clean up the empty qemu process. */ if (retcode == 0) { - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; }
- if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + goto endjob; + } + if (mig->network) + if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation"); }
- if (mig->network) - if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); - if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3312,7 +3342,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3350,20 +3380,26 @@ qemuMigrationFinish(struct qemud_driver *driver,
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
- event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); + if (!(flags & VIR_MIGRATE_OFFLINE)) { event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, + VIR_DOMAIN_PAUSED_USER); + if (event) + qemuDomainEventQueue(driver, event); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + } } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3430,6 +3466,9 @@ int qemuMigrationConfirm(struct qemud_driver *driver, if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1;
+ if (flags & VIR_MIGRATE_OFFLINE) + goto done; + /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs */ @@ -3465,6 +3504,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+done: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..f2dc5aa 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, @@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(struct qemud_driver *driver, int *cookieoutlen, virStreamPtr st, const char *dname, - const char *dom_xml); + const char *dom_xml, + unsigned long flags);
int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, @@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(struct qemud_driver *driver, const char *uri_in, char **uri_out, const char *dname, - const char *dom_xml); + const char *dom_xml, + unsigned long flags);
int qemuMigrationPerform(struct qemud_driver *driver, virConnectPtr conn, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index cc47383..5d18bdf 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6661,6 +6661,7 @@ static const vshCmdInfo info_migrate[] = {
static const vshCmdOptDef opts_migrate[] = { {"live", VSH_OT_BOOL, 0, N_("live migration")}, + {"offline", VSH_OT_BOOL, 0, N_("offline (domain's inactive) migration")}, {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")}, {"direct", VSH_OT_BOOL, 0, N_("direct migration")}, {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"}, @@ -6746,6 +6747,10 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); diff --git a/tools/virsh.pod b/tools/virsh.pod index 29be39e..b3ef64e 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1026,13 +1026,14 @@ I<--total> for only the total stats, I<start> for only the per-cpu stats of the CPUs from I<start>, I<count> for only I<count> CPUs' stats.
-=item B<migrate> [I<--live>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] +=item B<migrate> [I<--live>] [I<--offline>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] [I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>] [I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>] I<domain> I<desturi> [I<migrateuri>] [I<dname>] [I<--timeout> B<seconds>] [I<--xml> B<file>]
-Migrate domain to another host. Add I<--live> for live migration; I<--p2p> +Migrate domain to another host. Add I<--live> for live migration; +I<--offline> for offline (domain's inactive) migration; <--p2p> for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled> for tunnelled migration. I<--persistent> leaves the domain persistent on destination host, I<--undefinesource> undefines the domain on the source host,
-- regards! li guang linux kernel team at FNST, china thinking with brain but heart living with heart but brain

Hi, Jirka waiting for your comment ... 在 2012-11-21三的 16:28 +0800,liguang写道:
original migration did not aware of offline case, so, try to support offline migration quietly (did not disturb original migration) by pass VIR_MIGRATE_OFFLINE flag to migration APIs if only the domain is really inactive, and migration process will not puzzled by domain offline and exit unexpectedly. these changes did not take care of disk images the domain required, for them could be transferred by other APIs as suggested, then VIR_MIGRATE_OFFLINE must not combined with VIR_MIGRATE_NON_SHARED_*. and you must do a persistent migration at same time, do "virsh migrate --offline --persistent ...".
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 4 + src/qemu/qemu_driver.c | 16 +++--- src/qemu/qemu_migration.c | 140 +++++++++++++++++++++++++++--------------- src/qemu/qemu_migration.h | 9 ++- tools/virsh-domain.c | 5 ++ tools/virsh.pod | 5 +- 7 files changed, 117 insertions(+), 63 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 49a361a..ea625b3 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -1092,6 +1092,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */ } virDomainMigrateFlags;
/* Domain migration. */ diff --git a/src/libvirt.c b/src/libvirt.c index bdb1dc6..6d749d9 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4827,6 +4827,10 @@ virDomainMigrateVersion3(virDomainPtr domain, if (uri_out) uri = uri_out; /* Did domainMigratePrepare3 change URI? */
+ if (flags & VIR_MIGRATE_OFFLINE) { + cancelled = 0; + goto finish; + } /* Perform the migration. The driver isn't supposed to return * until the migration is complete. The src VM should remain * running, but in paused state until the destination can diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 595c452..1ba1665 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -9625,7 +9625,7 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
ret = qemuMigrationPrepareTunnel(driver, dconn, NULL, 0, NULL, NULL, /* No cookies in v2 */ - st, dname, dom_xml); + st, dname, dom_xml, flags);
cleanup: qemuDriverUnlock(driver); @@ -9685,7 +9685,7 @@ qemudDomainMigratePrepare2(virConnectPtr dconn, ret = qemuMigrationPrepareDirect(driver, dconn, NULL, 0, NULL, NULL, /* No cookies */ uri_in, uri_out, - dname, dom_xml); + dname, dom_xml, flags);
cleanup: qemuDriverUnlock(driver); @@ -9827,7 +9827,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, asyncJob = QEMU_ASYNC_JOB_NONE; }
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -9836,9 +9836,9 @@ qemuDomainMigrateBegin3(virDomainPtr domain, /* Check if there is any ejected media. * We don't want to require them on the destination. */ - - if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE) && + qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) + goto endjob;
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, @@ -9922,7 +9922,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn, cookiein, cookieinlen, cookieout, cookieoutlen, uri_in, uri_out, - dname, dom_xml); + dname, dom_xml, flags);
cleanup: qemuDriverUnlock(driver); @@ -9967,7 +9967,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn, ret = qemuMigrationPrepareTunnel(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, - st, dname, dom_xml); + st, dname, dom_xml, flags); qemuDriverUnlock(driver);
cleanup: diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d52ec59..53171df 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1442,6 +1442,24 @@ char *qemuMigrationBegin(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", + _("offline migration cannot handle " + "non-shared storage")); + goto cleanup; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", + _("offline migration must be specified with " + "the persistent flag set")); + goto cleanup; + } + } + if (xmlin) { if (!(def = virDomainDefParseString(driver->caps, xmlin, QEMU_EXPECTED_VIRT_TYPES, @@ -1499,7 +1517,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, const char *dname, const char *dom_xml, const char *migrateFrom, - virStreamPtr st) + virStreamPtr st, + unsigned long flags) { virDomainDefPtr def = NULL; virDomainObjPtr vm = NULL; @@ -1609,15 +1628,18 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ - if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, - VIR_QEMU_PROCESS_START_PAUSED | - VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { - virDomainAuditStart(vm, "migrated", false); - /* Note that we don't set an error here because qemuProcessStart - * should have already done that. - */ - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], + NULL, NULL, + VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, + VIR_QEMU_PROCESS_START_PAUSED | + VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { + virDomainAuditStart(vm, "migrated", false); + /* Note that we don't set an error here because qemuProcessStart + * should have already done that. + */ + goto endjob; + } }
if (tunnel) { @@ -1625,7 +1647,8 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); virDomainAuditStart(vm, "migrated", false); - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); + if (!(flags & VIR_MIGRATE_OFFLINE)) + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); goto endjob; } dataFD[1] = -1; /* 'st' owns the FD now & will close it */ @@ -1640,13 +1663,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver, VIR_DEBUG("Received no lockstate"); }
- if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, - QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { - /* We could tear down the whole guest here, but - * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now. - */ - VIR_WARN("Unable to encode migration cookie"); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now. + */ + VIR_WARN("Unable to encode migration cookie"); + } }
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0) @@ -1708,7 +1733,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, int *cookieoutlen, virStreamPtr st, const char *dname, - const char *dom_xml) + const char *dom_xml, + unsigned long flags) { int ret;
@@ -1722,7 +1748,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, */ ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, dname, dom_xml, - "stdio", st); + "stdio", st, flags); return ret; }
@@ -1737,7 +1763,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, const char *uri_in, char **uri_out, const char *dname, - const char *dom_xml) + const char *dom_xml, + unsigned long flags) { static int port = 0; int this_port; @@ -1833,7 +1860,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen, cookieout, cookieoutlen, dname, dom_xml, - migrateFrom, NULL); + migrateFrom, NULL, flags); cleanup: VIR_FREE(hostname); if (ret != 0) @@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } + VIR_FREE(dom_xml); + if (ret == -1) goto cleanup;
@@ -2858,7 +2887,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, }
/* domain may have been stopped while we were talking to remote daemon */ - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto cleanup; @@ -2921,7 +2950,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver, if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup;
- if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("domain is not running")); goto endjob; @@ -3245,26 +3274,27 @@ qemuMigrationFinish(struct qemud_driver *driver, * object, but if no, clean up the empty qemu process. */ if (retcode == 0) { - if (!virDomainObjIsActive(vm)) { + if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); goto endjob; }
- if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - goto endjob; + if (!(flags & VIR_MIGRATE_OFFLINE)) { + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + goto endjob; + } + if (mig->network) + if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation"); }
- if (mig->network) - if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); - if (flags & VIR_MIGRATE_PERSIST_DEST) { virDomainDefPtr vmdef; if (vm->persistent) @@ -3312,7 +3342,7 @@ qemuMigrationFinish(struct qemud_driver *driver, event = NULL; }
- if (!(flags & VIR_MIGRATE_PAUSED)) { + if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -3350,20 +3380,26 @@ qemuMigrationFinish(struct qemud_driver *driver,
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
- event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); + if (!(flags & VIR_MIGRATE_OFFLINE)) { event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, + VIR_DOMAIN_PAUSED_USER); + if (event) + qemuDomainEventQueue(driver, event); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + } } - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; + + if (virDomainObjIsActive(vm)) { + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } }
/* Guest is successfully running, so cancel previous auto destroy */ @@ -3430,6 +3466,9 @@ int qemuMigrationConfirm(struct qemud_driver *driver, if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1;
+ if (flags & VIR_MIGRATE_OFFLINE) + goto done; + /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs */ @@ -3465,6 +3504,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver, } }
+done: qemuMigrationCookieFree(mig); rv = 0;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 7a2269a..f2dc5aa 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -36,7 +36,8 @@ VIR_MIGRATE_NON_SHARED_DISK | \ VIR_MIGRATE_NON_SHARED_INC | \ VIR_MIGRATE_CHANGE_PROTECTION | \ - VIR_MIGRATE_UNSAFE) + VIR_MIGRATE_UNSAFE | \ + VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0, @@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(struct qemud_driver *driver, int *cookieoutlen, virStreamPtr st, const char *dname, - const char *dom_xml); + const char *dom_xml, + unsigned long flags);
int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, @@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(struct qemud_driver *driver, const char *uri_in, char **uri_out, const char *dname, - const char *dom_xml); + const char *dom_xml, + unsigned long flags);
int qemuMigrationPerform(struct qemud_driver *driver, virConnectPtr conn, diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index cc47383..5d18bdf 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6661,6 +6661,7 @@ static const vshCmdInfo info_migrate[] = {
static const vshCmdOptDef opts_migrate[] = { {"live", VSH_OT_BOOL, 0, N_("live migration")}, + {"offline", VSH_OT_BOOL, 0, N_("offline (domain's inactive) migration")}, {"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")}, {"direct", VSH_OT_BOOL, 0, N_("direct migration")}, {"tunneled", VSH_OT_ALIAS, 0, "tunnelled"}, @@ -6746,6 +6747,10 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + flags |= VIR_MIGRATE_OFFLINE; + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); diff --git a/tools/virsh.pod b/tools/virsh.pod index 29be39e..b3ef64e 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1026,13 +1026,14 @@ I<--total> for only the total stats, I<start> for only the per-cpu stats of the CPUs from I<start>, I<count> for only I<count> CPUs' stats.
-=item B<migrate> [I<--live>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] +=item B<migrate> [I<--live>] [I<--offline>] [I<--direct>] [I<--p2p> [I<--tunnelled>]] [I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>] [I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>] I<domain> I<desturi> [I<migrateuri>] [I<dname>] [I<--timeout> B<seconds>] [I<--xml> B<file>]
-Migrate domain to another host. Add I<--live> for live migration; I<--p2p> +Migrate domain to another host. Add I<--live> for live migration; +I<--offline> for offline (domain's inactive) migration; <--p2p> for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled> for tunnelled migration. I<--persistent> leaves the domain persistent on destination host, I<--undefinesource> undefines the domain on the source host,
-- regards! li guang linux kernel team at FNST, china thinking with brain but heart living with heart but brain

Hi, Jirka Please ... Thanks! 在 2012-11-27二的 09:22 +0100,Jiri Denemark写道:
On Tue, Nov 27, 2012 at 08:45:24 +0800, li guang wrote:
Hi, Jirka
waiting for your comment ...
Hi,
Don't worry, I returned from vacation yesterday and I'll get to your patch sometime this week.
Jirka
-- regards! li guang

On Wed, Nov 21, 2012 at 16:28:49 +0800, liguang wrote: ...
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d52ec59..53171df 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } + VIR_FREE(dom_xml); + if (ret == -1) goto cleanup;
I wonder why you keep changing the code that I agreed with in the previous version. Similar thing happened from v13 to v14. This change would break p2p migration (which was handled correctly in v14). Anyway, I combined the good pieces of code from v13, v14, and v15, added some fixes and a code to give reasonable error messages when libvirt client and source and destination libvirt daemons do not all come from the same release. The following is the diff to you patch, which may serve as my review comments. I'll send a combined v16 patch shortly. diff --git a/src/libvirt.c b/src/libvirt.c index 6144a17..f48ae53 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4830,9 +4830,13 @@ virDomainMigrateVersion3(virDomainPtr domain, uri = uri_out; /* Did domainMigratePrepare3 change URI? */ if (flags & VIR_MIGRATE_OFFLINE) { + VIR_DEBUG("Offline migration, skipping Perform phase"); + VIR_FREE(cookieout); + cookieoutlen = 0; cancelled = 0; goto finish; } + /* Perform the migration. The driver isn't supposed to return * until the migration is complete. The src VM should remain * running, but in paused state until the destination can @@ -5203,6 +5207,23 @@ virDomainMigrate(virDomainPtr domain, goto error; } + if (flags & VIR_MIGRATE_OFFLINE) { + if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the source host")); + goto error; + } + if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the destination host")); + goto error; + } + } + if (flags & VIR_MIGRATE_PEER2PEER) { if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_P2P)) { @@ -5408,6 +5429,23 @@ virDomainMigrate2(virDomainPtr domain, goto error; } + if (flags & VIR_MIGRATE_OFFLINE) { + if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the source host")); + goto error; + } + if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the destination host")); + goto error; + } + } + if (flags & VIR_MIGRATE_PEER2PEER) { if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_P2P)) { @@ -5585,6 +5623,15 @@ virDomainMigrateToURI(virDomainPtr domain, virCheckNonNullArgGoto(duri, error); + if (flags & VIR_MIGRATE_OFFLINE && + !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the source host")); + goto error; + } + if (flags & VIR_MIGRATE_PEER2PEER) { if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_P2P)) { diff --git a/src/libvirt_internal.h b/src/libvirt_internal.h index 2eda156..595d2db 100644 --- a/src/libvirt_internal.h +++ b/src/libvirt_internal.h @@ -105,6 +105,11 @@ enum { * Support for VIR_DOMAIN_XML_MIGRATABLE flag in domainGetXMLDesc */ VIR_DRV_FEATURE_XML_MIGRATABLE = 11, + + /* + * Support for offline migration. + */ + VIR_DRV_FEATURE_MIGRATION_OFFLINE = 12, }; diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 12ca3d2..d449579 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1208,6 +1208,7 @@ qemuSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int feature) case VIR_DRV_FEATURE_FD_PASSING: case VIR_DRV_FEATURE_TYPED_PARAM_STRING: case VIR_DRV_FEATURE_XML_MIGRATABLE: + case VIR_DRV_FEATURE_MIGRATION_OFFLINE: return 1; default: return 0; @@ -9911,7 +9912,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, */ if (!(flags & VIR_MIGRATE_OFFLINE) && qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) - goto endjob; + goto endjob; if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 95ff392..0ca7dd4 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1445,19 +1445,23 @@ char *qemuMigrationBegin(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_OFFLINE) { if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", + virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("offline migration cannot handle " "non-shared storage")); goto cleanup; } if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", + virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("offline migration must be specified with " "the persistent flag set")); goto cleanup; } + if (flags & VIR_MIGRATE_TUNNELLED) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("tunnelled offline migration does not " + "make sense")); + goto cleanup; + } } if (xmlin) { @@ -1531,10 +1535,33 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, bool tunnel = !!st; char *origname = NULL; char *xmlout = NULL; + unsigned int cookieFlags; if (virTimeMillisNow(&now) < 0) return -1; + if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("offline migration cannot handle " + "non-shared storage")); + goto cleanup; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("offline migration must be specified with " + "the persistent flag set")); + goto cleanup; + } + if (tunnel) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("tunnelled offline migration does not " + "make sense")); + goto cleanup; + } + } + if (!(def = virDomainDefParseString(driver->caps, dom_xml, QEMU_EXPECTED_VIRT_TYPES, VIR_DOMAIN_XML_INACTIVE))) @@ -1618,6 +1645,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1; + if (flags & VIR_MIGRATE_OFFLINE) + goto done; + if (tunnel && (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) { virReportSystemError(errno, "%s", @@ -1628,18 +1658,15 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ - if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], - NULL, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, - VIR_QEMU_PROCESS_START_PAUSED | - VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { - virDomainAuditStart(vm, "migrated", false); - /* Note that we don't set an error here because qemuProcessStart - * should have already done that. - */ - goto endjob; - } + if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL, + VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, + VIR_QEMU_PROCESS_START_PAUSED | + VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { + virDomainAuditStart(vm, "migrated", false); + /* Note that we don't set an error here because qemuProcessStart + * should have already done that. + */ + goto endjob; } if (tunnel) { @@ -1647,8 +1674,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); virDomainAuditStart(vm, "migrated", false); - if (!(flags & VIR_MIGRATE_OFFLINE)) - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); goto endjob; } dataFD[1] = -1; /* 'st' owns the FD now & will close it */ @@ -1663,24 +1689,30 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, VIR_DEBUG("Received no lockstate"); } - if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, - QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { - /* We could tear down the whole guest here, but - * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now. - */ - VIR_WARN("Unable to encode migration cookie"); - } +done: + if (flags & VIR_MIGRATE_OFFLINE) + cookieFlags = 0; + else + cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS; + + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + cookieFlags) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now. + */ + VIR_WARN("Unable to encode migration cookie"); } if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0) goto endjob; - virDomainAuditStart(vm, "migrated", true); - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_STARTED, - VIR_DOMAIN_EVENT_STARTED_MIGRATED); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + virDomainAuditStart(vm, "migrated", true); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STARTED, + VIR_DOMAIN_EVENT_STARTED_MIGRATED); + } /* We keep the job active across API calls until the finish() call. * This prevents any other APIs being invoked while incoming @@ -2702,12 +2734,18 @@ static int doPeer2PeerMigrate3(virQEMUDriverPtr driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } - VIR_FREE(dom_xml); - if (ret == -1) goto cleanup; + if (flags & VIR_MIGRATE_OFFLINE) { + VIR_DEBUG("Offline migration, skipping Perform phase"); + VIR_FREE(cookieout); + cookieoutlen = 0; + cancelled = 0; + goto finish; + } + if (!(flags & VIR_MIGRATE_TUNNELLED) && (uri_out == NULL)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -2846,6 +2884,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, virConnectPtr dconn = NULL; bool p2p; virErrorPtr orig_err = NULL; + bool offline; VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, " "uri=%s, flags=%lx, dname=%s, resource=%lu", @@ -2878,6 +2917,9 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, */ *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_V3); + if (flags & VIR_MIGRATE_OFFLINE) + offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE); qemuDomainObjExitRemoteWithDriver(driver, vm); if (!p2p) { @@ -2886,6 +2928,13 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, goto cleanup; } + if (flags & VIR_MIGRATE_OFFLINE && !offline) { + virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the destination host")); + goto cleanup; + } + /* domain may have been stopped while we were talking to remote daemon */ if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -3320,9 +3369,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + } if (newVM) vm->persistent = 0; } @@ -3395,16 +3446,15 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } } - if (virDomainObjIsActive(vm)) { - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; - } + if (virDomainObjIsActive(vm) && + virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; } /* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); - } else { + } else if (!(flags & VIR_MIGRATE_OFFLINE)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed");

在 2012-12-07五的 16:01 +0100,Jiri Denemark写道:
On Wed, Nov 21, 2012 at 16:28:49 +0800, liguang wrote: ...
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d52ec59..53171df 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } + VIR_FREE(dom_xml); + if (ret == -1) goto cleanup;
I wonder why you keep changing the code that I agreed with in the previous version. Similar thing happened from v13 to v14. This change would break p2p migration (which was handled correctly in v14).
Anyway, I combined the good pieces of code from v13, v14, and v15, added some fixes and a code to give reasonable error messages when libvirt client and source and destination libvirt daemons do not all come from the same release.
The following is the diff to you patch, which may serve as my review comments. I'll send a combined v16 patch shortly.
Thank you so much! you cut my changes for p2p & tunnel, but, that's fine.
diff --git a/src/libvirt.c b/src/libvirt.c index 6144a17..f48ae53 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4830,9 +4830,13 @@ virDomainMigrateVersion3(virDomainPtr domain, uri = uri_out; /* Did domainMigratePrepare3 change URI? */
if (flags & VIR_MIGRATE_OFFLINE) { + VIR_DEBUG("Offline migration, skipping Perform phase"); + VIR_FREE(cookieout); + cookieoutlen = 0; cancelled = 0; goto finish; } + /* Perform the migration. The driver isn't supposed to return * until the migration is complete. The src VM should remain * running, but in paused state until the destination can @@ -5203,6 +5207,23 @@ virDomainMigrate(virDomainPtr domain, goto error; }
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the source host")); + goto error; + } + if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the destination host")); + goto error; + } + } + if (flags & VIR_MIGRATE_PEER2PEER) { if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_P2P)) { @@ -5408,6 +5429,23 @@ virDomainMigrate2(virDomainPtr domain, goto error; }
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the source host")); + goto error; + } + if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the destination host")); + goto error; + } + } + if (flags & VIR_MIGRATE_PEER2PEER) { if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_P2P)) { @@ -5585,6 +5623,15 @@ virDomainMigrateToURI(virDomainPtr domain,
virCheckNonNullArgGoto(duri, error);
+ if (flags & VIR_MIGRATE_OFFLINE && + !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the source host")); + goto error; + } + if (flags & VIR_MIGRATE_PEER2PEER) { if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_P2P)) { diff --git a/src/libvirt_internal.h b/src/libvirt_internal.h index 2eda156..595d2db 100644 --- a/src/libvirt_internal.h +++ b/src/libvirt_internal.h @@ -105,6 +105,11 @@ enum { * Support for VIR_DOMAIN_XML_MIGRATABLE flag in domainGetXMLDesc */ VIR_DRV_FEATURE_XML_MIGRATABLE = 11, + + /* + * Support for offline migration. + */ + VIR_DRV_FEATURE_MIGRATION_OFFLINE = 12, };
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 12ca3d2..d449579 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1208,6 +1208,7 @@ qemuSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int feature) case VIR_DRV_FEATURE_FD_PASSING: case VIR_DRV_FEATURE_TYPED_PARAM_STRING: case VIR_DRV_FEATURE_XML_MIGRATABLE: + case VIR_DRV_FEATURE_MIGRATION_OFFLINE: return 1; default: return 0; @@ -9911,7 +9912,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain, */ if (!(flags & VIR_MIGRATE_OFFLINE) && qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0) - goto endjob; + goto endjob;
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname, cookieout, cookieoutlen, diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 95ff392..0ca7dd4 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1445,19 +1445,23 @@ char *qemuMigrationBegin(virQEMUDriverPtr driver, if (flags & VIR_MIGRATE_OFFLINE) { if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", + virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("offline migration cannot handle " "non-shared storage")); goto cleanup; } if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { - virReportError(VIR_ERR_OPERATION_INVALID, - "%s", + virReportError(VIR_ERR_OPERATION_INVALID, "%s", _("offline migration must be specified with " "the persistent flag set")); goto cleanup; } + if (flags & VIR_MIGRATE_TUNNELLED) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("tunnelled offline migration does not " + "make sense")); + goto cleanup; + } }
if (xmlin) { @@ -1531,10 +1535,33 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, bool tunnel = !!st; char *origname = NULL; char *xmlout = NULL; + unsigned int cookieFlags;
if (virTimeMillisNow(&now) < 0) return -1;
+ if (flags & VIR_MIGRATE_OFFLINE) { + if (flags & (VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("offline migration cannot handle " + "non-shared storage")); + goto cleanup; + } + if (!(flags & VIR_MIGRATE_PERSIST_DEST)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("offline migration must be specified with " + "the persistent flag set")); + goto cleanup; + } + if (tunnel) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("tunnelled offline migration does not " + "make sense")); + goto cleanup; + } + } + if (!(def = virDomainDefParseString(driver->caps, dom_xml, QEMU_EXPECTED_VIRT_TYPES, VIR_DOMAIN_XML_INACTIVE))) @@ -1618,6 +1645,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, /* Domain starts inactive, even if the domain XML had an id field. */ vm->def->id = -1;
+ if (flags & VIR_MIGRATE_OFFLINE) + goto done; + if (tunnel && (pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) { virReportSystemError(errno, "%s", @@ -1628,18 +1658,15 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ - if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], - NULL, NULL, - VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, - VIR_QEMU_PROCESS_START_PAUSED | - VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { - virDomainAuditStart(vm, "migrated", false); - /* Note that we don't set an error here because qemuProcessStart - * should have already done that. - */ - goto endjob; - } + if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL, + VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, + VIR_QEMU_PROCESS_START_PAUSED | + VIR_QEMU_PROCESS_START_AUTODESROY) < 0) { + virDomainAuditStart(vm, "migrated", false); + /* Note that we don't set an error here because qemuProcessStart + * should have already done that. + */ + goto endjob; }
if (tunnel) { @@ -1647,8 +1674,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, virReportSystemError(errno, "%s", _("cannot pass pipe for tunnelled migration")); virDomainAuditStart(vm, "migrated", false); - if (!(flags & VIR_MIGRATE_OFFLINE)) - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0); goto endjob; } dataFD[1] = -1; /* 'st' owns the FD now & will close it */ @@ -1663,24 +1689,30 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, VIR_DEBUG("Received no lockstate"); }
- if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, - QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { - /* We could tear down the whole guest here, but - * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now. - */ - VIR_WARN("Unable to encode migration cookie"); - } +done: + if (flags & VIR_MIGRATE_OFFLINE) + cookieFlags = 0; + else + cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS; + + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + cookieFlags) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now. + */ + VIR_WARN("Unable to encode migration cookie"); }
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0) goto endjob;
- virDomainAuditStart(vm, "migrated", true); - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_STARTED, - VIR_DOMAIN_EVENT_STARTED_MIGRATED); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + virDomainAuditStart(vm, "migrated", true); + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STARTED, + VIR_DOMAIN_EVENT_STARTED_MIGRATED); + }
/* We keep the job active across API calls until the finish() call. * This prevents any other APIs being invoked while incoming @@ -2702,12 +2734,18 @@ static int doPeer2PeerMigrate3(virQEMUDriverPtr driver, uri, &uri_out, flags, dname, resource, dom_xml); qemuDomainObjExitRemoteWithDriver(driver, vm); } - VIR_FREE(dom_xml); - if (ret == -1) goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) { + VIR_DEBUG("Offline migration, skipping Perform phase"); + VIR_FREE(cookieout); + cookieoutlen = 0; + cancelled = 0; + goto finish; + } + if (!(flags & VIR_MIGRATE_TUNNELLED) && (uri_out == NULL)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -2846,6 +2884,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, virConnectPtr dconn = NULL; bool p2p; virErrorPtr orig_err = NULL; + bool offline;
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, " "uri=%s, flags=%lx, dname=%s, resource=%lu", @@ -2878,6 +2917,9 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, */ *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_V3); + if (flags & VIR_MIGRATE_OFFLINE) + offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_OFFLINE); qemuDomainObjExitRemoteWithDriver(driver, vm);
if (!p2p) { @@ -2886,6 +2928,13 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver, goto cleanup; }
+ if (flags & VIR_MIGRATE_OFFLINE && !offline) { + virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("offline migration is not supported by " + "the destination host")); + goto cleanup; + } + /* domain may have been stopped while we were talking to remote daemon */ if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -3320,9 +3369,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); + if (!(flags & VIR_MIGRATE_OFFLINE)) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + } if (newVM) vm->persistent = 0; } @@ -3395,16 +3446,15 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } }
- if (virDomainObjIsActive(vm)) { - if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { - VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; - } + if (virDomainObjIsActive(vm) && + virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; }
/* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); - } else { + } else if (!(flags & VIR_MIGRATE_OFFLINE)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed");
-- regards! li guang
participants (3)
-
Jiri Denemark
-
li guang
-
liguang