在 2012-12-07五的 16:01 +0100,Jiri Denemark写道:
On Wed, Nov 21, 2012 at 16:28:49 +0800, liguang wrote:
...
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index d52ec59..53171df 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -2675,7 +2702,9 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
> uri, &uri_out, flags, dname, resource, dom_xml);
> qemuDomainObjExitRemoteWithDriver(driver, vm);
> }
> +
> VIR_FREE(dom_xml);
> +
> if (ret == -1)
> goto cleanup;
>
I wonder why you keep changing the code that I agreed with in the previous
version. Similar thing happened from v13 to v14. This change would break p2p
migration (which was handled correctly in v14).
Anyway, I combined the good pieces of code from v13, v14, and v15, added some
fixes and a code to give reasonable error messages when libvirt client and
source and destination libvirt daemons do not all come from the same release.
The following is the diff to you patch, which may serve as my review comments.
I'll send a combined v16 patch shortly.
Thank you so much!
you cut my changes for p2p & tunnel,
but, that's fine.
diff --git a/src/libvirt.c b/src/libvirt.c
index 6144a17..f48ae53 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -4830,9 +4830,13 @@ virDomainMigrateVersion3(virDomainPtr domain,
uri = uri_out; /* Did domainMigratePrepare3 change URI? */
if (flags & VIR_MIGRATE_OFFLINE) {
+ VIR_DEBUG("Offline migration, skipping Perform phase");
+ VIR_FREE(cookieout);
+ cookieoutlen = 0;
cancelled = 0;
goto finish;
}
+
/* Perform the migration. The driver isn't supposed to return
* until the migration is complete. The src VM should remain
* running, but in paused state until the destination can
@@ -5203,6 +5207,23 @@ virDomainMigrate(virDomainPtr domain,
goto error;
}
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("offline migration is not supported by "
+ "the source host"));
+ goto error;
+ }
+ if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("offline migration is not supported by "
+ "the destination host"));
+ goto error;
+ }
+ }
+
if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) {
@@ -5408,6 +5429,23 @@ virDomainMigrate2(virDomainPtr domain,
goto error;
}
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("offline migration is not supported by "
+ "the source host"));
+ goto error;
+ }
+ if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("offline migration is not supported by "
+ "the destination host"));
+ goto error;
+ }
+ }
+
if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) {
@@ -5585,6 +5623,15 @@ virDomainMigrateToURI(virDomainPtr domain,
virCheckNonNullArgGoto(duri, error);
+ if (flags & VIR_MIGRATE_OFFLINE &&
+ !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
+ virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("offline migration is not supported by "
+ "the source host"));
+ goto error;
+ }
+
if (flags & VIR_MIGRATE_PEER2PEER) {
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_P2P)) {
diff --git a/src/libvirt_internal.h b/src/libvirt_internal.h
index 2eda156..595d2db 100644
--- a/src/libvirt_internal.h
+++ b/src/libvirt_internal.h
@@ -105,6 +105,11 @@ enum {
* Support for VIR_DOMAIN_XML_MIGRATABLE flag in domainGetXMLDesc
*/
VIR_DRV_FEATURE_XML_MIGRATABLE = 11,
+
+ /*
+ * Support for offline migration.
+ */
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE = 12,
};
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 12ca3d2..d449579 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1208,6 +1208,7 @@ qemuSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int
feature)
case VIR_DRV_FEATURE_FD_PASSING:
case VIR_DRV_FEATURE_TYPED_PARAM_STRING:
case VIR_DRV_FEATURE_XML_MIGRATABLE:
+ case VIR_DRV_FEATURE_MIGRATION_OFFLINE:
return 1;
default:
return 0;
@@ -9911,7 +9912,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
*/
if (!(flags & VIR_MIGRATE_OFFLINE) &&
qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
- goto endjob;
+ goto endjob;
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
cookieout, cookieoutlen,
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 95ff392..0ca7dd4 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -1445,19 +1445,23 @@ char *qemuMigrationBegin(virQEMUDriverPtr driver,
if (flags & VIR_MIGRATE_OFFLINE) {
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
VIR_MIGRATE_NON_SHARED_INC)) {
- virReportError(VIR_ERR_OPERATION_INVALID,
- "%s",
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("offline migration cannot handle "
"non-shared storage"));
goto cleanup;
}
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
- virReportError(VIR_ERR_OPERATION_INVALID,
- "%s",
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("offline migration must be specified with "
"the persistent flag set"));
goto cleanup;
}
+ if (flags & VIR_MIGRATE_TUNNELLED) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("tunnelled offline migration does not "
+ "make sense"));
+ goto cleanup;
+ }
}
if (xmlin) {
@@ -1531,10 +1535,33 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
bool tunnel = !!st;
char *origname = NULL;
char *xmlout = NULL;
+ unsigned int cookieFlags;
if (virTimeMillisNow(&now) < 0)
return -1;
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC)) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("offline migration cannot handle "
+ "non-shared storage"));
+ goto cleanup;
+ }
+ if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("offline migration must be specified with "
+ "the persistent flag set"));
+ goto cleanup;
+ }
+ if (tunnel) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("tunnelled offline migration does not "
+ "make sense"));
+ goto cleanup;
+ }
+ }
+
if (!(def = virDomainDefParseString(driver->caps, dom_xml,
QEMU_EXPECTED_VIRT_TYPES,
VIR_DOMAIN_XML_INACTIVE)))
@@ -1618,6 +1645,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
/* Domain starts inactive, even if the domain XML had an id field. */
vm->def->id = -1;
+ if (flags & VIR_MIGRATE_OFFLINE)
+ goto done;
+
if (tunnel &&
(pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
virReportSystemError(errno, "%s",
@@ -1628,18 +1658,15 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
/* Start the QEMU daemon, with the same command-line arguments plus
* -incoming $migrateFrom
*/
- if (!(flags & VIR_MIGRATE_OFFLINE)) {
- if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0],
- NULL, NULL,
- VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
- VIR_QEMU_PROCESS_START_PAUSED |
- VIR_QEMU_PROCESS_START_AUTODESROY) < 0) {
- virDomainAuditStart(vm, "migrated", false);
- /* Note that we don't set an error here because qemuProcessStart
- * should have already done that.
- */
- goto endjob;
- }
+ if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL,
+ VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
+ VIR_QEMU_PROCESS_START_PAUSED |
+ VIR_QEMU_PROCESS_START_AUTODESROY) < 0) {
+ virDomainAuditStart(vm, "migrated", false);
+ /* Note that we don't set an error here because qemuProcessStart
+ * should have already done that.
+ */
+ goto endjob;
}
if (tunnel) {
@@ -1647,8 +1674,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
virReportSystemError(errno, "%s",
_("cannot pass pipe for tunnelled
migration"));
virDomainAuditStart(vm, "migrated", false);
- if (!(flags & VIR_MIGRATE_OFFLINE))
- qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
goto endjob;
}
dataFD[1] = -1; /* 'st' owns the FD now & will close it */
@@ -1663,24 +1689,30 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
VIR_DEBUG("Received no lockstate");
}
- if (!(flags & VIR_MIGRATE_OFFLINE)) {
- if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
- QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) {
- /* We could tear down the whole guest here, but
- * cookie data is (so far) non-critical, so that
- * seems a little harsh. We'll just warn for now.
- */
- VIR_WARN("Unable to encode migration cookie");
- }
+done:
+ if (flags & VIR_MIGRATE_OFFLINE)
+ cookieFlags = 0;
+ else
+ cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS;
+
+ if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
+ cookieFlags) < 0) {
+ /* We could tear down the whole guest here, but
+ * cookie data is (so far) non-critical, so that
+ * seems a little harsh. We'll just warn for now.
+ */
+ VIR_WARN("Unable to encode migration cookie");
}
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
goto endjob;
- virDomainAuditStart(vm, "migrated", true);
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_STARTED,
- VIR_DOMAIN_EVENT_STARTED_MIGRATED);
+ if (!(flags & VIR_MIGRATE_OFFLINE)) {
+ virDomainAuditStart(vm, "migrated", true);
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STARTED,
+ VIR_DOMAIN_EVENT_STARTED_MIGRATED);
+ }
/* We keep the job active across API calls until the finish() call.
* This prevents any other APIs being invoked while incoming
@@ -2702,12 +2734,18 @@ static int doPeer2PeerMigrate3(virQEMUDriverPtr driver,
uri, &uri_out, flags, dname, resource, dom_xml);
qemuDomainObjExitRemoteWithDriver(driver, vm);
}
-
VIR_FREE(dom_xml);
-
if (ret == -1)
goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ VIR_DEBUG("Offline migration, skipping Perform phase");
+ VIR_FREE(cookieout);
+ cookieoutlen = 0;
+ cancelled = 0;
+ goto finish;
+ }
+
if (!(flags & VIR_MIGRATE_TUNNELLED) &&
(uri_out == NULL)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
@@ -2846,6 +2884,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
virConnectPtr dconn = NULL;
bool p2p;
virErrorPtr orig_err = NULL;
+ bool offline;
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
"uri=%s, flags=%lx, dname=%s, resource=%lu",
@@ -2878,6 +2917,9 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
*/
*v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_V3);
+ if (flags & VIR_MIGRATE_OFFLINE)
+ offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
+ VIR_DRV_FEATURE_MIGRATION_OFFLINE);
qemuDomainObjExitRemoteWithDriver(driver, vm);
if (!p2p) {
@@ -2886,6 +2928,13 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
goto cleanup;
}
+ if (flags & VIR_MIGRATE_OFFLINE && !offline) {
+ virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("offline migration is not supported by "
+ "the destination host"));
+ goto cleanup;
+ }
+
/* domain may have been stopped while we were talking to remote daemon */
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
@@ -3320,9 +3369,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
* to restart during confirm() step, so we kill it off now.
*/
if (v3proto) {
- qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
- VIR_QEMU_PROCESS_STOP_MIGRATED);
- virDomainAuditStop(vm, "failed");
+ if (!(flags & VIR_MIGRATE_OFFLINE)) {
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+ VIR_QEMU_PROCESS_STOP_MIGRATED);
+ virDomainAuditStop(vm, "failed");
+ }
if (newVM)
vm->persistent = 0;
}
@@ -3395,16 +3446,15 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
}
}
- if (virDomainObjIsActive(vm)) {
- if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
- VIR_WARN("Failed to save status on vm %s",
vm->def->name);
- goto endjob;
- }
+ if (virDomainObjIsActive(vm) &&
+ virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto endjob;
}
/* Guest is successfully running, so cancel previous auto destroy */
qemuProcessAutoDestroyRemove(driver, vm);
- } else {
+ } else if (!(flags & VIR_MIGRATE_OFFLINE)) {
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
VIR_QEMU_PROCESS_STOP_MIGRATED);
virDomainAuditStop(vm, "failed");
--
regards!
li guang