[libvirt] [PATCH 00/16 v4] Implement migration v3 protocol

This is an update to http://www.redhat.com/archives/libvir-list/2011-April/msg01012.html In this update - Bug fixes from previous review - Rebase ontop of recent changes - Run tunnelled migration I/O loop in background thread to allow use of normal qemuMigrationWaitForCompletion code

Migration just seems togo from bad to worse. We already had to introduce a second migration protocol when adding the QEMU driver, since the one from Xen was insufficiently flexible to cope with passing the data the QEMU driver required. It turns out that this protocol still has some flaws that we need to address. The current sequence is * Src: DumpXML - Generate XML to pass to dst * Dst: Prepare - Get ready to accept incoming VM - Generate optional cookie to pass to src * Src: Perform - Start migration and wait for send completion - Kill off VM if successful, resume if failed * Dst: Finish - Wait for recv completion and check status - Kill off VM if unsuccessful The problems with this are: - Since the first step is a generic 'DumpXML' call, we can't add in other migration specific data. eg, we can't include any VM lease data from lock manager plugins - Since the first step is a generic 'DumpXML' call, we can't emit any 'migration begin' event on the source, or have any hook that runs right at the start of the process - Since there is no final step on the source, if the Finish method fails to receive all migration data & has to kill the VM, then there's no way to resume the original VM on the source This patch attempts to introduce a version 3 that uses the improved 5 step sequence * Src: Begin - Generate XML to pass to dst - Generate optional cookie to pass to dst * Dst: Prepare - Get ready to accept incoming VM - Generate optional cookie to pass to src * Src: Perform - Start migration and wait for send completion - Generate optional cookie to pass to dst * Dst: Finish - Wait for recv completion and check status - Kill off VM if failed, resume if success - Generate optional cookie to pass to src * Src: Confirm - Kill off VM if success, resume if failed The API is designed to allow both input and output cookies in all methods where applicable. This lets us pass around arbitrary extra driver specific data between src & dst during migration. Combined with the extra 'Begin' method this lets us pass lease information from source to dst at the start of migration Moving the killing of the source VM out of Perform and into Confirm, means we can now recover if the dst host can't successfully Finish receiving migration data. --- src/driver.h | 77 ++++++- src/esx/esx_driver.c | 6 + src/libvirt.c | 578 ++++++++++++++++++++++++++++++++++++++++++-- src/libvirt_internal.h | 66 +++++ src/libvirt_private.syms | 6 + src/libxl/libxl_driver.c | 6 + src/lxc/lxc_driver.c | 6 + src/openvz/openvz_driver.c | 6 + src/phyp/phyp_driver.c | 6 + src/qemu/qemu_driver.c | 6 + src/remote/remote_driver.c | 6 + src/test/test_driver.c | 6 + src/uml/uml_driver.c | 6 + src/vbox/vbox_tmpl.c | 6 + src/vmware/vmware_driver.c | 6 + src/xen/xen_driver.c | 6 + src/xenapi/xenapi_driver.c | 6 + 17 files changed, 785 insertions(+), 20 deletions(-) diff --git a/src/driver.h b/src/driver.h index 5cd0cea..ca28a9a 100644 --- a/src/driver.h +++ b/src/driver.h @@ -403,7 +403,7 @@ typedef int typedef int (*virDrvDomainMigratePrepareTunnel) - (virConnectPtr conn, + (virConnectPtr dconn, virStreamPtr st, unsigned long flags, const char *dname, @@ -518,6 +518,75 @@ typedef int typedef int (*virDrvDomainInjectNMI)(virDomainPtr dom, unsigned int flags); +typedef char * + (*virDrvDomainMigrateBegin3) + (virDomainPtr domain, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource); + +typedef int + (*virDrvDomainMigratePrepare3) + (virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri_in, + char **uri_out, + unsigned long flags, + const char *dname, + unsigned long resource, + const char *dom_xml); + +typedef int + (*virDrvDomainMigratePrepareTunnel3) + (virConnectPtr dconn, + virStreamPtr st, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource, + const char *dom_xml); + + +typedef int + (*virDrvDomainMigratePerform3) + (virDomainPtr dom, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource); + +typedef int + (*virDrvDomainMigrateFinish3) + (virConnectPtr dconn, + const char *dname, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + int cancelled, + virDomainPtr *newdom); + +typedef int + (*virDrvDomainMigrateConfirm3) + (virDomainPtr domain, + const char *cookiein, + int cookieinlen, + unsigned long flags, + int cancelled); /** * _virDriver: @@ -643,6 +712,12 @@ struct _virDriver { virDrvQemuDomainMonitorCommand qemuDomainMonitorCommand; virDrvDomainOpenConsole domainOpenConsole; virDrvDomainInjectNMI domainInjectNMI; + virDrvDomainMigrateBegin3 domainMigrateBegin3; + virDrvDomainMigratePrepare3 domainMigratePrepare3; + virDrvDomainMigratePrepareTunnel3 domainMigratePrepareTunnel3; + virDrvDomainMigratePerform3 domainMigratePerform3; + virDrvDomainMigrateFinish3 domainMigrateFinish3; + virDrvDomainMigrateConfirm3 domainMigrateConfirm3; }; typedef int diff --git a/src/esx/esx_driver.c b/src/esx/esx_driver.c index 5490ea7..25e0472 100644 --- a/src/esx/esx_driver.c +++ b/src/esx/esx_driver.c @@ -4700,6 +4700,12 @@ static virDriver esxDriver = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; diff --git a/src/libvirt.c b/src/libvirt.c index 09ab01c..c11ca12 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -3331,6 +3331,22 @@ error: } +/* + * Sequence v1: + * + * Dst: Prepare + * - Get ready to accept incoming VM + * - Generate optional cookie to pass to src + * + * Src: Perform + * - Start migration and wait for send completion + * - Kill off VM if successful, resume if failed + * + * Dst: Finish + * - Wait for recv completion and check status + * - Kill off VM if unsuccessful + * + */ static virDomainPtr virDomainMigrateVersion1 (virDomainPtr domain, virConnectPtr dconn, @@ -3399,6 +3415,25 @@ virDomainMigrateVersion1 (virDomainPtr domain, return ddomain; } +/* + * Sequence v2: + * + * Src: DumpXML + * - Generate XML to pass to dst + * + * Dst: Prepare + * - Get ready to accept incoming VM + * - Generate optional cookie to pass to src + * + * Src: Perform + * - Start migration and wait for send completion + * - Kill off VM if successful, resume if failed + * + * Dst: Finish + * - Wait for recv completion and check status + * - Kill off VM if unsuccessful + * + */ static virDomainPtr virDomainMigrateVersion2 (virDomainPtr domain, virConnectPtr dconn, @@ -3447,6 +3482,7 @@ virDomainMigrateVersion2 (virDomainPtr domain, flags |= VIR_MIGRATE_PAUSED; } + VIR_DEBUG("Prepare2 %p", dconn); ret = dconn->driver->domainMigratePrepare2 (dconn, &cookie, &cookielen, uri, &uri_out, flags, dname, bandwidth, dom_xml); @@ -3466,6 +3502,7 @@ virDomainMigrateVersion2 (virDomainPtr domain, /* Perform the migration. The driver isn't supposed to return * until the migration is complete. */ + VIR_DEBUG("Perform %p", domain->conn); ret = domain->conn->driver->domainMigratePerform (domain, cookie, cookielen, uri, flags, dname, bandwidth); @@ -3478,6 +3515,7 @@ virDomainMigrateVersion2 (virDomainPtr domain, * so it can do any cleanup if the migration failed. */ dname = dname ? dname : domain->name; + VIR_DEBUG("Finish2 %p ret=%d", dconn, ret); ddomain = dconn->driver->domainMigrateFinish2 (dconn, dname, cookie, cookielen, uri, flags, ret); @@ -3492,13 +3530,181 @@ virDomainMigrateVersion2 (virDomainPtr domain, } +/* + * Sequence v3: + * + * Src: Begin + * - Generate XML to pass to dst + * - Generate optional cookie to pass to dst + * + * Dst: Prepare + * - Get ready to accept incoming VM + * - Generate optional cookie to pass to src + * + * Src: Perform + * - Start migration and wait for send completion + * - Generate optional cookie to pass to dst + * + * Dst: Finish + * - Wait for recv completion and check status + * - Kill off VM if failed, resume if success + * - Generate optional cookie to pass to src + * + * Src: Confirm + * - Kill off VM if success, resume if failed + * + */ +static virDomainPtr +virDomainMigrateVersion3(virDomainPtr domain, + virConnectPtr dconn, + unsigned long flags, + const char *dname, + const char *uri, + unsigned long bandwidth) +{ + virDomainPtr ddomain = NULL; + char *uri_out = NULL; + char *cookiein = NULL; + char *cookieout = NULL; + char *dom_xml = NULL; + int cookieinlen = 0; + int cookieoutlen = 0; + int ret; + virDomainInfo info; + virErrorPtr orig_err = NULL; + int cancelled; + + if (!domain->conn->driver->domainMigrateBegin3 || + !domain->conn->driver->domainMigratePerform3 || + !domain->conn->driver->domainMigrateConfirm3 || + !dconn->driver->domainMigratePrepare3 || + !dconn->driver->domainMigrateFinish3) { + virLibConnError(VIR_ERR_INTERNAL_ERROR, __FUNCTION__); + virDispatchError(domain->conn); + return NULL; + } + + VIR_DEBUG("Begin3 %p", domain->conn); + dom_xml = domain->conn->driver->domainMigrateBegin3 + (domain, &cookieout, &cookieoutlen, flags, dname, + bandwidth); + if (!dom_xml) + goto done; + + ret = virDomainGetInfo (domain, &info); + if (ret == 0 && info.state == VIR_DOMAIN_PAUSED) { + flags |= VIR_MIGRATE_PAUSED; + } + + VIR_DEBUG("Prepare3 %p", dconn); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + ret = dconn->driver->domainMigratePrepare3 + (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen, + uri, &uri_out, flags, dname, bandwidth, dom_xml); + VIR_FREE (dom_xml); + if (ret == -1) + goto done; + + if (uri == NULL && uri_out == NULL) { + virLibConnError(VIR_ERR_INTERNAL_ERROR, + _("domainMigratePrepare3 did not set uri")); + virDispatchError(domain->conn); + goto done; + } + if (uri_out) + uri = uri_out; /* Did domainMigratePrepare3 change URI? */ + + /* Perform the migration. The driver isn't supposed to return + * until the migration is complete. The src VM should remain + * running, but in paused state until the destination can + * confirm migration completion. + */ + VIR_DEBUG("Perform3 %p uri=%s", domain->conn, uri); + VIR_FREE(cookiein); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + ret = domain->conn->driver->domainMigratePerform3 + (domain, cookiein, cookieinlen, &cookieout, &cookieoutlen, + uri, flags, dname, bandwidth); + + /* Perform failed. Make sure Finish doesn't overwrite the error */ + if (ret < 0) + orig_err = virSaveLastError(); + + /* If Perform returns < 0, then we need to cancel the VM + * startup on the destination + */ + cancelled = ret < 0 ? 1 : 0; + + /* + * The status code from the source is passed to the destination. + * The dest can cleanup if the source indicated it failed to + * send all migration data. Returns NULL for ddomain if + * the dest was unable to complete migration. + */ + VIR_DEBUG("Finish3 %p ret=%d", dconn, ret); + VIR_FREE(cookiein); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + dname = dname ? dname : domain->name; + ret = dconn->driver->domainMigrateFinish3 + (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen, + uri, flags, cancelled, &ddomain); + + /* If ret is 0 then 'ddomain' indicates whether the VM is + * running on the dest. If not running, we can restart + * the source. If ret is -1, we can't be sure what happened + * to the VM on the dest, thus the only safe option is to + * kill the VM on the source, even though that may leave + * no VM at all on either host. + */ + cancelled = ret == 0 && ddomain == NULL ? 1 : 0; + + /* + * If cancelled, then src VM will be restarted, else + * it will be killed + */ + VIR_DEBUG("Confirm3 %p ret=%d domain=%p", domain->conn, ret, domain); + VIR_FREE(cookiein); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + ret = domain->conn->driver->domainMigrateConfirm3 + (domain, cookiein, cookieinlen, + flags, cancelled); + /* If Confirm3 returns -1, there's nothing more we can + * do, but fortunately worst case is that there is a + * domain left in 'paused' state on source. + */ + + done: + if (orig_err) { + virSetError(orig_err); + virFreeError(orig_err); + } + VIR_FREE(uri_out); + VIR_FREE(cookiein); + VIR_FREE(cookieout); + return ddomain; +} + + /* - * This is sort of a migration v3 + * In normal migration, the libvirt client co-ordinates communcation + * between the 2 libvirtd instances on source & dest hosts. * - * In this version, the client does not talk to the destination - * libvirtd. The source libvirtd will still try to talk to the - * destination libvirtd though, and will do the prepare/perform/finish - * steps. + * In this peer-2-peer migration alternative, the libvirt client + * only talks to the source libvirtd instance. The source libvirtd + * then opens its own connection to the destination and co-ordinates + * migration itself. */ static int virDomainMigratePeer2Peer (virDomainPtr domain, @@ -3544,14 +3750,15 @@ virDomainMigratePeer2Peer (virDomainPtr domain, /* - * This is a variation on v1 & 2 migration + * In normal migration, the libvirt client co-ordinates communcation + * between the 2 libvirtd instances on source & dest hosts. * - * This is for hypervisors which can directly handshake - * without any libvirtd involvement on destination either - * from client, or source libvirt. + * Some hypervisors support an alternative, direct migration where + * there is no requirement for a libvirtd instance on the dest host. + * In this case * - * eg, XenD can talk direct to XenD, so libvirtd on dest - * does not need to be involved at all, or even running + * eg, XenD can talk direct to XenD, so libvirtd on dest does not + * need to be involved at all, or even running */ static int virDomainMigrateDirect (virDomainPtr domain, @@ -3691,6 +3898,7 @@ virDomainMigrate (virDomainPtr domain, return NULL; } + VIR_DEBUG0("Using peer2peer migration"); if (virDomainMigratePeer2Peer(domain, flags, dname, uri ? uri : dstURI, bandwidth) < 0) { VIR_FREE(dstURI); goto error; @@ -3712,16 +3920,24 @@ virDomainMigrate (virDomainPtr domain, /* Check that migration is supported by both drivers. */ if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, - VIR_DRV_FEATURE_MIGRATION_V1) && + VIR_DRV_FEATURE_MIGRATION_V3) && VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, - VIR_DRV_FEATURE_MIGRATION_V1)) - ddomain = virDomainMigrateVersion1(domain, dconn, flags, dname, uri, bandwidth); - else if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, - VIR_DRV_FEATURE_MIGRATION_V2) && - VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, - VIR_DRV_FEATURE_MIGRATION_V2)) + VIR_DRV_FEATURE_MIGRATION_V3)) { + VIR_DEBUG0("Using migration protocol 3"); + ddomain = virDomainMigrateVersion3(domain, dconn, flags, dname, uri, bandwidth); + } else if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_V2) && + VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_V2)) { + VIR_DEBUG0("Using migration protocol 2"); ddomain = virDomainMigrateVersion2(domain, dconn, flags, dname, uri, bandwidth); - else { + } else if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATION_V1) && + VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_V1)) { + VIR_DEBUG0("Using migration protocol 1"); + ddomain = virDomainMigrateVersion1(domain, dconn, flags, dname, uri, bandwidth); + } else { /* This driver does not support any migration method */ virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); goto error; @@ -4150,6 +4366,330 @@ error: return -1; } +/* + * Not for public use. This function is part of the internal + * implementation of migration in the remote case. + */ +char * +virDomainMigrateBegin3(virDomainPtr domain, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long bandwidth) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain, "cookieout=%p, cookieoutlen=%p, " + "flags=%lu, dname=%s, bandwidth=%lu", + cookieout, cookieoutlen, flags, + NULLSTR(dname), bandwidth); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN (domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return NULL; + } + conn = domain->conn; + + if (domain->conn->flags & VIR_CONNECT_RO) { + virLibDomainError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + if (conn->driver->domainMigrateBegin3) { + char *xml; + xml = conn->driver->domainMigrateBegin3(domain, + cookieout, cookieoutlen, + flags, dname, bandwidth); + VIR_DEBUG("xml %s", NULLSTR(xml)); + if (!xml) + goto error; + return xml; + } + + virLibDomainError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return NULL; +} + + +/* + * Not for public use. This function is part of the internal + * implementation of migration in the remote case. + */ +int +virDomainMigratePrepare3(virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri_in, + char **uri_out, + unsigned long flags, + const char *dname, + unsigned long bandwidth, + const char *dom_xml) +{ + VIR_DEBUG("dconn=%p, cookiein=%p, cookieinlen=%d, cookieout=%p, cookieoutlen=%p," + "uri_in=%s, uri_out=%p, flags=%lu, dname=%s, bandwidth=%lu, dom_xml=%s", + dconn, cookiein, cookieinlen, cookieout, cookieoutlen, uri_in, uri_out, + flags, NULLSTR(dname), bandwidth, dom_xml); + + virResetLastError(); + + if (!VIR_IS_CONNECT (dconn)) { + virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + if (dconn->flags & VIR_CONNECT_RO) { + virLibConnError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + if (dconn->driver->domainMigratePrepare3) { + int ret; + ret = dconn->driver->domainMigratePrepare3(dconn, + cookiein, cookieinlen, + cookieout, cookieoutlen, + uri_in, uri_out, + flags, dname, bandwidth, + dom_xml); + if (ret < 0) + goto error; + return ret; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(dconn); + return -1; +} + +/* + * Not for public use. This function is part of the internal + * implementation of migration in the remote case. + */ +int +virDomainMigratePrepareTunnel3(virConnectPtr conn, + virStreamPtr st, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long bandwidth, + const char *dom_xml) + +{ + VIR_DEBUG("conn=%p, stream=%p, cookiein=%p, cookieinlen=%d, cookieout=%p," + " cookieoutlen=%p, flags=%lu, dname=%s, bandwidth=%lu, dom_xml=%s", + conn, st, cookiein, cookieinlen, cookieout, cookieoutlen, flags, + NULLSTR(dname), bandwidth, dom_xml); + + virResetLastError(); + + if (!VIR_IS_CONNECT(conn)) { + virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + if (conn->flags & VIR_CONNECT_RO) { + virLibConnError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + if (conn != st->conn) { + virLibConnError(VIR_ERR_INVALID_ARG, __FUNCTION__); + goto error; + } + + if (conn->driver->domainMigratePrepareTunnel3) { + int rv = conn->driver->domainMigratePrepareTunnel3(conn, st, + cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, dname, + bandwidth, dom_xml); + if (rv < 0) + goto error; + return rv; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(conn); + return -1; +} + + +/* + * Not for public use. This function is part of the internal + * implementation of migration in the remote case. + */ +int +virDomainMigratePerform3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long bandwidth) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain, "cookiein=%p, cookieinlen=%d, cookieout=%p, cookieoutlen=%p," + "uri=%s, flags=%lu, dname=%s, bandwidth=%lu", + cookiein, cookieinlen, cookieout, cookieoutlen, + uri, flags, NULLSTR(dname), bandwidth); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN (domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + conn = domain->conn; + + if (domain->conn->flags & VIR_CONNECT_RO) { + virLibDomainError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + if (conn->driver->domainMigratePerform3) { + int ret; + ret = conn->driver->domainMigratePerform3(domain, + cookiein, cookieinlen, + cookieout, cookieoutlen, + uri, + flags, dname, bandwidth); + if (ret < 0) + goto error; + return ret; + } + + virLibDomainError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return -1; +} + + +/* + * Not for public use. This function is part of the internal + * implementation of migration in the remote case. + */ +int +virDomainMigrateFinish3(virConnectPtr dconn, + const char *dname, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + int cancelled, + virDomainPtr *newdom) +{ + VIR_DEBUG("dconn=%p, dname=%s, cookiein=%p, cookieinlen=%d, cookieout=%p," + "cookieoutlen=%p, uri=%s, flags=%lu, retcode=%d newdom=%p", + dconn, NULLSTR(dname), cookiein, cookieinlen, cookieout, + cookieoutlen, uri, flags, cancelled, newdom); + + virResetLastError(); + + if (!VIR_IS_CONNECT (dconn)) { + virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + if (dconn->flags & VIR_CONNECT_RO) { + virLibConnError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + if (dconn->driver->domainMigrateFinish3) { + int ret; + ret = dconn->driver->domainMigrateFinish3(dconn, dname, + cookiein, cookieinlen, + cookieout, cookieoutlen, + uri, flags, + cancelled, + newdom); + if (ret < 0) + goto error; + return ret; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(dconn); + return -1; +} + + +/* + * Not for public use. This function is part of the internal + * implementation of migration in the remote case. + */ +int +virDomainMigrateConfirm3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + unsigned long flags, + int cancelled) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain, "cookiein=%p, cookieinlen=%d, flags=%lu, cancelled=%d", + cookiein, cookieinlen, flags, cancelled); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN (domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + conn = domain->conn; + + if (domain->conn->flags & VIR_CONNECT_RO) { + virLibDomainError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + if (conn->driver->domainMigrateConfirm3) { + int ret; + ret = conn->driver->domainMigrateConfirm3(domain, + cookiein, cookieinlen, + flags, cancelled); + if (ret < 0) + goto error; + return ret; + } + + virLibDomainError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return -1; +} + /** * virNodeGetInfo: diff --git a/src/libvirt_internal.h b/src/libvirt_internal.h index 1c4fa4f..81d0c56 100644 --- a/src/libvirt_internal.h +++ b/src/libvirt_internal.h @@ -66,6 +66,13 @@ enum { * perform step is used. */ VIR_DRV_FEATURE_MIGRATION_DIRECT = 5, + + /* + * Driver supports V3-style virDomainMigrate, ie domainMigrateBegin3/ + * domainMigratePrepare3/domainMigratePerform3/domainMigrateFinish3/ + * domainMigrateConfirm3. + */ + VIR_DRV_FEATURE_MIGRATION_V3 = 6, }; @@ -115,4 +122,63 @@ int virDomainMigratePrepareTunnel(virConnectPtr dconn, unsigned long resource, const char *dom_xml); + +char *virDomainMigrateBegin3(virDomainPtr domain, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource); + +int virDomainMigratePrepare3(virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri_in, + char **uri_out, + unsigned long flags, + const char *dname, + unsigned long resource, + const char *dom_xml); + +int virDomainMigratePrepareTunnel3(virConnectPtr dconn, + virStreamPtr st, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource, + const char *dom_xml); + + +int virDomainMigratePerform3(virDomainPtr dom, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource); + +int virDomainMigrateFinish3(virConnectPtr dconn, + const char *dname, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + int cancelled, /* Kill the dst VM */ + virDomainPtr *newdom); + +int virDomainMigrateConfirm3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + unsigned long flags, + int restart); /* Restart the src VM */ + #endif diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 7e5b1d7..81def5c 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -558,6 +558,12 @@ virDomainMigratePerform; virDomainMigratePrepare2; virDomainMigratePrepare; virDomainMigratePrepareTunnel; +virDomainMigrateBegin3; +virDomainMigratePrepare3; +virDomainMigratePrepareTunnel3; +virDomainMigratePerform3; +virDomainMigrateFinish3; +virDomainMigrateConfirm3; virDrvSupportsFeature; virRegisterDeviceMonitor; virRegisterDriver; diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c index 5a5de4f..9044385 100644 --- a/src/libxl/libxl_driver.c +++ b/src/libxl/libxl_driver.c @@ -2791,6 +2791,12 @@ static virDriver libxlDriver = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; static virStateDriver libxlStateDriver = { diff --git a/src/lxc/lxc_driver.c b/src/lxc/lxc_driver.c index c91fdf4..bd628b8 100644 --- a/src/lxc/lxc_driver.c +++ b/src/lxc/lxc_driver.c @@ -2816,6 +2816,12 @@ static virDriver lxcDriver = { NULL, /* qemuDomainMonitorCommand */ lxcDomainOpenConsole, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; static virStateDriver lxcStateDriver = { diff --git a/src/openvz/openvz_driver.c b/src/openvz/openvz_driver.c index f5fae2d..f233fad 100644 --- a/src/openvz/openvz_driver.c +++ b/src/openvz/openvz_driver.c @@ -1668,6 +1668,12 @@ static virDriver openvzDriver = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; int openvzRegister(void) { diff --git a/src/phyp/phyp_driver.c b/src/phyp/phyp_driver.c index fd3b6d4..6a1a156 100644 --- a/src/phyp/phyp_driver.c +++ b/src/phyp/phyp_driver.c @@ -3829,6 +3829,12 @@ static virDriver phypDriver = { NULL, /* qemuMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; static virStorageDriver phypStorageDriver = { diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 4f288d3..5a35f9f 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -7244,6 +7244,12 @@ static virDriver qemuDriver = { qemuDomainMonitorCommand, /* qemuDomainMonitorCommand */ qemuDomainOpenConsole, /* domainOpenConsole */ qemuDomainInjectNMI, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index 37940f3..694a7b2 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -6499,6 +6499,12 @@ static virDriver remote_driver = { remoteQemuDomainMonitorCommand, /* qemuDomainMonitorCommand */ remoteDomainOpenConsole, /* domainOpenConsole */ remoteDomainInjectNMI, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; static virNetworkDriver network_driver = { diff --git a/src/test/test_driver.c b/src/test/test_driver.c index b6883f3..e8a490a 100644 --- a/src/test/test_driver.c +++ b/src/test/test_driver.c @@ -5448,6 +5448,12 @@ static virDriver testDriver = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; static virNetworkDriver testNetworkDriver = { diff --git a/src/uml/uml_driver.c b/src/uml/uml_driver.c index 3c0a950..28e0351 100644 --- a/src/uml/uml_driver.c +++ b/src/uml/uml_driver.c @@ -2254,6 +2254,12 @@ static virDriver umlDriver = { NULL, /* qemuDomainMonitorCommand */ umlDomainOpenConsole, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; static int diff --git a/src/vbox/vbox_tmpl.c b/src/vbox/vbox_tmpl.c index e52145e..d7ef1ae 100644 --- a/src/vbox/vbox_tmpl.c +++ b/src/vbox/vbox_tmpl.c @@ -8653,6 +8653,12 @@ virDriver NAME(Driver) = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; virNetworkDriver NAME(NetworkDriver) = { diff --git a/src/vmware/vmware_driver.c b/src/vmware/vmware_driver.c index a3a13c1..7182e9a 100644 --- a/src/vmware/vmware_driver.c +++ b/src/vmware/vmware_driver.c @@ -1008,6 +1008,12 @@ static virDriver vmwareDriver = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; int diff --git a/src/xen/xen_driver.c b/src/xen/xen_driver.c index cd65625..6b608b7 100644 --- a/src/xen/xen_driver.c +++ b/src/xen/xen_driver.c @@ -2209,6 +2209,12 @@ static virDriver xenUnifiedDriver = { NULL, /* qemuDomainMonitorCommand */ xenUnifiedDomainOpenConsole, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; /** diff --git a/src/xenapi/xenapi_driver.c b/src/xenapi/xenapi_driver.c index 7e863ac..f99ebaf 100644 --- a/src/xenapi/xenapi_driver.c +++ b/src/xenapi/xenapi_driver.c @@ -1890,6 +1890,12 @@ static virDriver xenapiDriver = { NULL, /* qemuDomainMonitorCommand */ NULL, /* domainOpenConsole */ NULL, /* domainInjectNMI */ + NULL, /* domainMigrateBegin3 */ + NULL, /* domainMigratePrepare3 */ + NULL, /* domainMigratePrepareTunnel3 */ + NULL, /* domainMigratePerform3 */ + NULL, /* domainMigrateFinish3 */ + NULL, /* domainMigrateConfirm3 */ }; /** -- 1.7.4.4

On Wed, May 11, 2011 at 10:09:47 +0100, Daniel P. Berrange wrote:
The problems with this [migration v2] are:
- Since the first step is a generic 'DumpXML' call, we can't add in other migration specific data. eg, we can't include any VM lease data from lock manager plugins - Since the first step is a generic 'DumpXML' call, we can't emit any 'migration begin' event on the source, or have any hook that runs right at the start of the process - Since there is no final step on the source, if the Finish method fails to receive all migration data & has to kill the VM, then there's no way to resume the original VM on the source
Sorry for not noticing it earlier but I think we have another problem with our current migration schema (and this v3 as well). Domain XML may contain some data (such as /domain/devices/graphics/@listen) that an application may wish to change when migrating a domain. What would be the best way to implement the ability to modify domain XML that is passed to target libvirtd? A callback parameter for virDomainMigrate API or something else perhaps? Jirka

On Wed, May 11, 2011 at 11:51:41AM +0200, Jiri Denemark wrote:
On Wed, May 11, 2011 at 10:09:47 +0100, Daniel P. Berrange wrote:
The problems with this [migration v2] are:
- Since the first step is a generic 'DumpXML' call, we can't add in other migration specific data. eg, we can't include any VM lease data from lock manager plugins - Since the first step is a generic 'DumpXML' call, we can't emit any 'migration begin' event on the source, or have any hook that runs right at the start of the process - Since there is no final step on the source, if the Finish method fails to receive all migration data & has to kill the VM, then there's no way to resume the original VM on the source
Sorry for not noticing it earlier but I think we have another problem with our current migration schema (and this v3 as well). Domain XML may contain some data (such as /domain/devices/graphics/@listen) that an application may wish to change when migrating a domain. What would be the best way to implement the ability to modify domain XML that is passed to target libvirtd? A callback parameter for virDomainMigrate API or something else perhaps?
Well there's several issues here. The immediate one is that the public migration API doesn't provide any way to expose this kind of capaibility to applications. The v3 migration code is providing a new internal migration protocol, for our existing public migration API, so existing migration usage will be unchanged from app dev POV. I also don't much like the idea of allowing the application to make arbitrary config changes at migration time. In particular it will make it hard to provide any kind of meaningful fine grained access control over migration, and hard to guarantee that things like disk locking are consistent. Our general goal has been that the XML should be designed such that it does not rely on host specific config tasks. cf the work being done for virtual network switches, so we can avoid directly refering to host interface names for NIC config. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
Migration just seems togo from bad to worse. We already had to
s/togo/to go/
introduce a second migration protocol when adding the QEMU driver, since the one from Xen was insufficiently flexible to cope with passing the data the QEMU driver required.
@@ -643,6 +712,12 @@ struct _virDriver { virDrvQemuDomainMonitorCommand qemuDomainMonitorCommand; virDrvDomainOpenConsole domainOpenConsole; virDrvDomainInjectNMI domainInjectNMI; + virDrvDomainMigrateBegin3 domainMigrateBegin3; + virDrvDomainMigratePrepare3 domainMigratePrepare3; + virDrvDomainMigratePrepareTunnel3 domainMigratePrepareTunnel3; + virDrvDomainMigratePerform3 domainMigratePerform3; + virDrvDomainMigrateFinish3 domainMigrateFinish3; + virDrvDomainMigrateConfirm3 domainMigrateConfirm3;
Should we group these callbacks next door to their version2 counterparts to make it easier to find all the migration callbacks next to one another? Up to you; I didn't do it below...
@@ -3691,6 +3898,7 @@ virDomainMigrate (virDomainPtr domain, return NULL; }
+ VIR_DEBUG0("Using peer2peer migration");
Recent patch flurry means this won't compile. Isn't rebasing fun? :)
+int +virDomainMigratePerform3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long bandwidth) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain, "cookiein=%p, cookieinlen=%d, cookieout=%p, cookieoutlen=%p," + "uri=%s, flags=%lu, dname=%s, bandwidth=%lu", + cookiein, cookieinlen, cookieout, cookieoutlen, + uri, flags, NULLSTR(dname), bandwidth);
Oh my; 10 arguments. You broke my < 10-argument assumption in one blow :)
+ + virResetLastError(); + + if (!VIR_IS_CONNECT (dconn)) {
Still some spaces before ( if you want to clean that up. ACK with this squashed in: diff --git i/src/libvirt.c w/src/libvirt.c index ec888b2..aa72fb3 100644 --- i/src/libvirt.c +++ w/src/libvirt.c @@ -312,15 +312,17 @@ static struct gcry_thread_cbs virTLSThreadImpl = { }; /* Helper macros to implement VIR_DOMAIN_DEBUG using just C99. This - * assumes you pass fewer than 10 arguments to VIR_DOMAIN_DEBUG, but + * assumes you pass fewer than 15 arguments to VIR_DOMAIN_DEBUG, but * can easily be expanded if needed. * * Note that gcc provides extensions of "define a(b...) b" or * "define a(b,...) b,##__VA_ARGS__" as a means of eliding a comma * when no var-args are present, but we don't want to require gcc. */ -#define VIR_ARG10(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, ...) _10 -#define VIR_HAS_COMMA(...) VIR_ARG10(__VA_ARGS__, 1, 1, 1, 1, 1, 1, 1, 1, 0) +#define VIR_ARG15(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, \ + _11, _12, _13, _14, _15, ...) _15 +#define VIR_HAS_COMMA(...) \ + VIR_ARG15(__VA_ARGS__, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* Form the name VIR_DOMAIN_DEBUG_[01], then call that macro, * according to how many arguments are present. Two-phase due to @@ -3439,7 +3441,7 @@ virDomainMigrateVersion1 (virDomainPtr domain, ddomain = dconn->driver->domainMigrateFinish (dconn, dname, cookie, cookielen, uri, flags); else - ddomain = virDomainLookupByName (dconn, dname); + ddomain = virDomainLookupByName(dconn, dname); done: VIR_FREE (uri_out); @@ -3930,14 +3932,14 @@ virDomainMigrate (virDomainPtr domain, return NULL; } - VIR_DEBUG0("Using peer2peer migration"); + VIR_DEBUG("Using peer2peer migration"); if (virDomainMigratePeer2Peer(domain, flags, dname, uri ? uri : dstURI, bandwidth) < 0) { VIR_FREE(dstURI); goto error; } VIR_FREE(dstURI); - ddomain = virDomainLookupByName (dconn, dname ? dname : domain->name); + ddomain = virDomainLookupByName(dconn, dname ? dname : domain->name); } else { /* This driver does not support peer to peer migration */ virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); @@ -3955,19 +3957,19 @@ virDomainMigrate (virDomainPtr domain, VIR_DRV_FEATURE_MIGRATION_V3) && VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_V3)) { - VIR_DEBUG0("Using migration protocol 3"); + VIR_DEBUG("Using migration protocol 3"); ddomain = virDomainMigrateVersion3(domain, dconn, flags, dname, uri, bandwidth); } else if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_V2) && VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_V2)) { - VIR_DEBUG0("Using migration protocol 2"); + VIR_DEBUG("Using migration protocol 2"); ddomain = virDomainMigrateVersion2(domain, dconn, flags, dname, uri, bandwidth); } else if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, VIR_DRV_FEATURE_MIGRATION_V1) && VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_V1)) { - VIR_DEBUG0("Using migration protocol 1"); + VIR_DEBUG("Using migration protocol 1"); ddomain = virDomainMigrateVersion1(domain, dconn, flags, dname, uri, bandwidth); } else { /* This driver does not support any migration method */ @@ -4122,7 +4124,7 @@ virDomainMigratePrepare (virConnectPtr dconn, virResetLastError(); - if (!VIR_IS_CONNECT (dconn)) { + if (!VIR_IS_CONNECT(dconn)) { virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); virDispatchError(NULL); return -1; @@ -4135,9 +4137,9 @@ virDomainMigratePrepare (virConnectPtr dconn, if (dconn->driver->domainMigratePrepare) { int ret; - ret = dconn->driver->domainMigratePrepare (dconn, cookie, cookielen, - uri_in, uri_out, - flags, dname, bandwidth); + ret = dconn->driver->domainMigratePrepare(dconn, cookie, cookielen, + uri_in, uri_out, + flags, dname, bandwidth); if (ret < 0) goto error; return ret; @@ -4218,7 +4220,7 @@ virDomainMigrateFinish (virConnectPtr dconn, virResetLastError(); - if (!VIR_IS_CONNECT (dconn)) { + if (!VIR_IS_CONNECT(dconn)) { virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); virDispatchError(NULL); return NULL; @@ -4231,9 +4233,9 @@ virDomainMigrateFinish (virConnectPtr dconn, if (dconn->driver->domainMigrateFinish) { virDomainPtr ret; - ret = dconn->driver->domainMigrateFinish (dconn, dname, - cookie, cookielen, - uri, flags); + ret = dconn->driver->domainMigrateFinish(dconn, dname, + cookie, cookielen, + uri, flags); if (!ret) goto error; return ret; @@ -4269,7 +4271,7 @@ virDomainMigratePrepare2 (virConnectPtr dconn, virResetLastError(); - if (!VIR_IS_CONNECT (dconn)) { + if (!VIR_IS_CONNECT(dconn)) { virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); virDispatchError(NULL); return -1; @@ -4282,10 +4284,10 @@ virDomainMigratePrepare2 (virConnectPtr dconn, if (dconn->driver->domainMigratePrepare2) { int ret; - ret = dconn->driver->domainMigratePrepare2 (dconn, cookie, cookielen, - uri_in, uri_out, - flags, dname, bandwidth, - dom_xml); + ret = dconn->driver->domainMigratePrepare2(dconn, cookie, cookielen, + uri_in, uri_out, + flags, dname, bandwidth, + dom_xml); if (ret < 0) goto error; return ret; @@ -4317,7 +4319,7 @@ virDomainMigrateFinish2 (virConnectPtr dconn, virResetLastError(); - if (!VIR_IS_CONNECT (dconn)) { + if (!VIR_IS_CONNECT(dconn)) { virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); virDispatchError(NULL); return NULL; @@ -4330,10 +4332,10 @@ virDomainMigrateFinish2 (virConnectPtr dconn, if (dconn->driver->domainMigrateFinish2) { virDomainPtr ret; - ret = dconn->driver->domainMigrateFinish2 (dconn, dname, - cookie, cookielen, - uri, flags, - retcode); + ret = dconn->driver->domainMigrateFinish2(dconn, dname, + cookie, cookielen, + uri, flags, + retcode); if (!ret) goto error; return ret; @@ -4474,7 +4476,7 @@ virDomainMigratePrepare3(virConnectPtr dconn, virResetLastError(); - if (!VIR_IS_CONNECT (dconn)) { + if (!VIR_IS_CONNECT(dconn)) { virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); virDispatchError(NULL); return -1; @@ -4643,7 +4645,7 @@ virDomainMigrateFinish3(virConnectPtr dconn, virResetLastError(); - if (!VIR_IS_CONNECT (dconn)) { + if (!VIR_IS_CONNECT(dconn)) { virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); virDispatchError(NULL); return -1; -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

* src/remote/remote_protocol.x: Define wire protocol for migration protocol v3 * daemon/remote.c: Server side dispatch * src/remote/remote_driver.c: Client side serialization * src/remote/remote_protocol.c, src/remote/remote_protocol.h, daemon/remote_dispatch_args.h, daemon/remote_dispatch_prototypes.h, daemon/remote_dispatch_ret.h, daemon/remote_dispatch_table.h: Re-generate files * src/remote_protocol-structs: Declare new ABIs --- daemon/remote.c | 315 +++++++++++++++++++++++++++++++++++ daemon/remote_generator.pl | 12 ++ src/remote/remote_driver.c | 370 +++++++++++++++++++++++++++++++++++++++++- src/remote/remote_protocol.x | 79 +++++++++- src/remote_protocol-structs | 90 ++++++++++ 5 files changed, 859 insertions(+), 7 deletions(-) diff --git a/daemon/remote.c b/daemon/remote.c index 2220655..f89d612 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -76,6 +76,7 @@ static virStorageVolPtr get_nonnull_storage_vol(virConnectPtr conn, remote_nonnu static virSecretPtr get_nonnull_secret(virConnectPtr conn, remote_nonnull_secret secret); static virNWFilterPtr get_nonnull_nwfilter(virConnectPtr conn, remote_nonnull_nwfilter nwfilter); static virDomainSnapshotPtr get_nonnull_domain_snapshot(virDomainPtr dom, remote_nonnull_domain_snapshot snapshot); +static int make_domain(remote_domain *dom_dst, virDomainPtr dom_src); static void make_nonnull_domain(remote_nonnull_domain *dom_dst, virDomainPtr dom_src); static void make_nonnull_network(remote_nonnull_network *net_dst, virNetworkPtr net_src); static void make_nonnull_interface(remote_nonnull_interface *interface_dst, virInterfacePtr interface_src); @@ -2989,6 +2990,305 @@ cleanup: #include "qemu_dispatch_bodies.h" +static int +remoteDispatchDomainMigrateBegin3(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client ATTRIBUTE_UNUSED, + virConnectPtr conn, + remote_message_header *hdr ATTRIBUTE_UNUSED, + remote_error *rerr, + remote_domain_migrate_begin3_args *args, + remote_domain_migrate_begin3_ret *ret) +{ + char *xml = NULL; + virDomainPtr dom = NULL; + char *dname; + char *cookieout = NULL; + int cookieoutlen = 0; + int rv = -1; + + if (!conn) { + virNetError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + if (!(dom = get_nonnull_domain(conn, args->dom))) + goto cleanup; + + dname = args->dname == NULL ? NULL : *args->dname; + + if (!(xml = virDomainMigrateBegin3(dom, + &cookieout, &cookieoutlen, + args->flags, dname, args->resource))) + goto cleanup; + + /* remoteDispatchClientRequest will free cookie and + * the xml string if there is one. + */ + ret->cookie_out.cookie_out_len = cookieoutlen; + ret->cookie_out.cookie_out_val = cookieout; + ret->xml = xml; + + rv = 0; + +cleanup: + if (rv < 0) + remoteDispatchError(rerr); + if (dom) + virDomainFree(dom); + return rv; +} + + +static int +remoteDispatchDomainMigratePrepare3(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client ATTRIBUTE_UNUSED, + virConnectPtr conn, + remote_message_header *hdr ATTRIBUTE_UNUSED, + remote_error *rerr, + remote_domain_migrate_prepare3_args *args, + remote_domain_migrate_prepare3_ret *ret) +{ + char *cookieout = NULL; + int cookieoutlen = 0; + char *uri_in; + char **uri_out; + char *dname; + int rv = -1; + + if (!conn) { + virNetError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + uri_in = args->uri_in == NULL ? NULL : *args->uri_in; + dname = args->dname == NULL ? NULL : *args->dname; + + /* Wacky world of XDR ... */ + if (VIR_ALLOC(uri_out) < 0) { + virReportOOMError(); + goto cleanup; + } + + if (virDomainMigratePrepare3(conn, + args->cookie_in.cookie_in_val, + args->cookie_in.cookie_in_len, + &cookieout, &cookieoutlen, + uri_in, uri_out, + args->flags, dname, args->resource, + args->dom_xml) < 0) + goto cleanup; + + /* remoteDispatchClientRequest will free cookie, uri_out and + * the string if there is one. + */ + ret->cookie_out.cookie_out_len = cookieoutlen; + ret->cookie_out.cookie_out_val = cookieout; + ret->uri_out = *uri_out == NULL ? NULL : uri_out; + + rv = 0; + +cleanup: + if (rv < 0) { + remoteDispatchError(rerr); + VIR_FREE(uri_out); + } + return rv; +} + +static int +remoteDispatchDomainMigratePrepareTunnel3(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client, + virConnectPtr conn, + remote_message_header *hdr, + remote_error *rerr, + remote_domain_migrate_prepare_tunnel3_args *args, + remote_domain_migrate_prepare_tunnel3_ret *ret) +{ + char *dname; + char *cookieout = NULL; + int cookieoutlen = 0; + struct qemud_client_stream *stream = NULL; + int rv = -1; + + if (!conn) { + virNetError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + dname = args->dname == NULL ? NULL : *args->dname; + + if (!(stream = remoteCreateClientStream(conn, hdr))) { + virReportOOMError(); + goto cleanup; + } + + if (virDomainMigratePrepareTunnel3(conn, stream->st, + args->cookie_in.cookie_in_val, + args->cookie_in.cookie_in_len, + &cookieout, &cookieoutlen, + args->flags, dname, args->resource, + args->dom_xml) < 0) + goto cleanup; + + if (remoteAddClientStream(client, stream, 0) < 0) + goto cleanup; + + /* remoteDispatchClientRequest will free cookie + */ + ret->cookie_out.cookie_out_len = cookieoutlen; + ret->cookie_out.cookie_out_val = cookieout; + + rv = 0; + +cleanup: + if (rv < 0) { + remoteDispatchError(rerr); + VIR_FREE(cookieout); + } + if (stream && rv < 0) { + virStreamAbort(stream->st); + remoteFreeClientStream(client, stream); + } + return rv; +} + +static int +remoteDispatchDomainMigratePerform3(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client ATTRIBUTE_UNUSED, + virConnectPtr conn, + remote_message_header *hdr ATTRIBUTE_UNUSED, + remote_error *rerr, + remote_domain_migrate_perform3_args *args, + remote_domain_migrate_perform3_ret *ret) +{ + virDomainPtr dom = NULL; + char *dname; + char *cookieout = NULL; + int cookieoutlen = 0; + int rv = -1; + + if (!conn) { + virNetError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + if (!(dom = get_nonnull_domain(conn, args->dom))) + goto cleanup; + + dname = args->dname == NULL ? NULL : *args->dname; + + if (virDomainMigratePerform3(dom, + args->cookie_in.cookie_in_val, + args->cookie_in.cookie_in_len, + &cookieout, &cookieoutlen, + args->uri, + args->flags, dname, args->resource) < 0) + goto cleanup; + + /* remoteDispatchClientRequest will free cookie + */ + ret->cookie_out.cookie_out_len = cookieoutlen; + ret->cookie_out.cookie_out_val = cookieout; + + rv = 0; + +cleanup: + if (rv < 0) + remoteDispatchError(rerr); + if (dom) + virDomainFree(dom); + return rv; +} + + +static int +remoteDispatchDomainMigrateFinish3(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client ATTRIBUTE_UNUSED, + virConnectPtr conn, + remote_message_header *hdr ATTRIBUTE_UNUSED, + remote_error *rerr, + remote_domain_migrate_finish3_args *args, + remote_domain_migrate_finish3_ret *ret) +{ + virDomainPtr dom = NULL; + char *cookieout = NULL; + int cookieoutlen = 0; + int rv = -1; + + if (!conn) { + virNetError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + if (virDomainMigrateFinish3(conn, args->dname, + args->cookie_in.cookie_in_val, + args->cookie_in.cookie_in_len, + &cookieout, &cookieoutlen, + args->uri, + args->flags, + args->cancelled, + &dom) < 0) + goto cleanup; + + if (dom && + make_domain(&ret->ddom, dom) < 0) + goto cleanup; + + /* remoteDispatchClientRequest will free cookie + */ + ret->cookie_out.cookie_out_len = cookieoutlen; + ret->cookie_out.cookie_out_val = cookieout; + + rv = 0; + +cleanup: + if (rv < 0) { + remoteDispatchError(rerr); + VIR_FREE(cookieout); + } + if (dom) + virDomainFree(dom); + return rv; +} + + +static int +remoteDispatchDomainMigrateConfirm3(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client ATTRIBUTE_UNUSED, + virConnectPtr conn, + remote_message_header *hdr ATTRIBUTE_UNUSED, + remote_error *rerr, + remote_domain_migrate_confirm3_args *args, + void *ret ATTRIBUTE_UNUSED) +{ + virDomainPtr dom = NULL; + int rv = -1; + + if (!conn) { + virNetError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + if (!(dom = get_nonnull_domain(conn, args->dom))) + goto cleanup; + + if (virDomainMigrateConfirm3(dom, + args->cookie_in.cookie_in_val, + args->cookie_in.cookie_in_len, + args->flags, args->cancelled) < 0) + goto cleanup; + + rv = 0; + +cleanup: + if (rv < 0) + remoteDispatchError(rerr); + if (dom) + virDomainFree(dom); + return rv; +} + + /*----- Helpers. -----*/ /* get_nonnull_domain and get_nonnull_network turn an on-wire @@ -3054,6 +3354,21 @@ get_nonnull_domain_snapshot(virDomainPtr dom, remote_nonnull_domain_snapshot sna } /* Make remote_nonnull_domain and remote_nonnull_network. */ +static int +make_domain(remote_domain *dom_dst, virDomainPtr dom_src) +{ + remote_domain rdom; + if (VIR_ALLOC(rdom) < 0) + return -1; + + rdom->id = dom_src->id; + rdom->name = strdup(dom_src->name); + memcpy(rdom->uuid, dom_src->uuid, VIR_UUID_BUFLEN); + + *dom_dst = rdom; + return 0; +} + static void make_nonnull_domain(remote_nonnull_domain *dom_dst, virDomainPtr dom_src) { diff --git a/daemon/remote_generator.pl b/daemon/remote_generator.pl index 79d2021..d525983 100755 --- a/daemon/remote_generator.pl +++ b/daemon/remote_generator.pl @@ -273,8 +273,14 @@ elsif ($opt_b) { "DomainGetVcpus", "DomainMemoryPeek", "DomainMemoryStats", + "DomainMigrateBegin3", + "DomainMigrateConfirm3", + "DomainMigrateFinish3", + "DomainMigratePerform3", "DomainMigratePrepare", "DomainMigratePrepare2", + "DomainMigratePrepare3", + "DomainMigratePrepareTunnel3", "GetType", "NodeDeviceGetParent", "NodeGetSecurityModel", @@ -802,8 +808,14 @@ elsif ($opt_k) { "DomainGetVcpus", "DomainMemoryPeek", "DomainMemoryStats", + "DomainMigrateBegin3", + "DomainMigrateConfirm3", + "DomainMigrateFinish3", + "DomainMigratePerform3", "DomainMigratePrepare", "DomainMigratePrepare2", + "DomainMigratePrepare3", + "DomainMigratePrepareTunnel3", "GetType", "NodeDeviceGetParent", "NodeGetSecurityModel", diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index 694a7b2..e113e39 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -245,6 +245,7 @@ static int remoteAuthPolkit (virConnectPtr conn, struct private_data *priv, int virReportErrorHelper(VIR_FROM_REMOTE, code, __FILE__, \ __FUNCTION__, __LINE__, __VA_ARGS__) +static virDomainPtr get_domain (virConnectPtr conn, remote_domain domain); static virDomainPtr get_nonnull_domain (virConnectPtr conn, remote_nonnull_domain domain); static virNetworkPtr get_nonnull_network (virConnectPtr conn, remote_nonnull_network network); static virNWFilterPtr get_nonnull_nwfilter (virConnectPtr conn, remote_nonnull_nwfilter nwfilter); @@ -4967,9 +4968,350 @@ done: return rv; } + +static char * +remoteDomainMigrateBegin3(virDomainPtr domain, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + char *rv = NULL; + remote_domain_migrate_begin3_args args; + remote_domain_migrate_begin3_ret ret; + struct private_data *priv = domain->conn->privateData; + + remoteDriverLock(priv); + + memset(&args, 0, sizeof(args)); + memset(&ret, 0, sizeof(ret)); + + make_nonnull_domain (&args.dom, domain); + args.flags = flags; + args.dname = dname == NULL ? NULL : (char **) &dname; + args.resource = resource; + + if (call (domain->conn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_BEGIN3, + (xdrproc_t) xdr_remote_domain_migrate_begin3_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_begin3_ret, (char *) &ret) == -1) + goto done; + + if (ret.cookie_out.cookie_out_len > 0) { + if (!cookieout || !cookieoutlen) { + remoteError(VIR_ERR_INTERNAL_ERROR, "%s", + _("caller ignores cookieout or cookieoutlen")); + goto error; + } + *cookieout = ret.cookie_out.cookie_out_val; /* Caller frees. */ + *cookieoutlen = ret.cookie_out.cookie_out_len; + } + + rv = ret.xml; /* caller frees */ + +done: + remoteDriverUnlock(priv); + return rv; + +error: + VIR_FREE(ret.cookie_out.cookie_out_val); + goto done; +} + + +static int +remoteDomainMigratePrepare3(virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri_in, + char **uri_out, + unsigned long flags, + const char *dname, + unsigned long resource, + const char *dom_xml) +{ + int rv = -1; + remote_domain_migrate_prepare3_args args; + remote_domain_migrate_prepare3_ret ret; + struct private_data *priv = dconn->privateData; + + remoteDriverLock(priv); + + memset(&args, 0, sizeof(args)); + memset(&ret, 0, sizeof(ret)); + + args.cookie_in.cookie_in_val = (char *)cookiein; + args.cookie_in.cookie_in_len = cookieinlen; + args.uri_in = uri_in == NULL ? NULL : (char **) &uri_in; + args.flags = flags; + args.dname = dname == NULL ? NULL : (char **) &dname; + args.resource = resource; + args.dom_xml = (char *) dom_xml; + + memset (&ret, 0, sizeof ret); + if (call (dconn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_PREPARE3, + (xdrproc_t) xdr_remote_domain_migrate_prepare3_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_prepare3_ret, (char *) &ret) == -1) + goto done; + + if (ret.cookie_out.cookie_out_len > 0) { + if (!cookieout || !cookieoutlen) { + remoteError(VIR_ERR_INTERNAL_ERROR, "%s", + _("caller ignores cookieout or cookieoutlen")); + goto error; + } + *cookieout = ret.cookie_out.cookie_out_val; /* Caller frees. */ + *cookieoutlen = ret.cookie_out.cookie_out_len; + } + if (ret.uri_out) { + if (!uri_out) { + remoteError(VIR_ERR_INTERNAL_ERROR, "%s", + _("caller ignores uri_out")); + goto error; + } + *uri_out = *ret.uri_out; /* Caller frees. */ + } + + rv = 0; + +done: + remoteDriverUnlock(priv); + return rv; +error: + VIR_FREE(ret.cookie_out.cookie_out_val); + if (ret.uri_out) + VIR_FREE(*ret.uri_out); + goto done; +} + + +static int +remoteDomainMigratePrepareTunnel3(virConnectPtr dconn, + virStreamPtr st, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource, + const char *dom_xml) +{ + struct private_data *priv = dconn->privateData; + struct private_stream_data *privst = NULL; + int rv = -1; + remote_domain_migrate_prepare_tunnel3_args args; + remote_domain_migrate_prepare_tunnel3_ret ret; + + remoteDriverLock(priv); + + memset(&args, 0, sizeof(args)); + memset(&ret, 0, sizeof(ret)); + + if (!(privst = remoteStreamOpen(st, + REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3, + priv->counter))) + goto done; + + st->driver = &remoteStreamDrv; + st->privateData = privst; + + args.cookie_in.cookie_in_val = (char *)cookiein; + args.cookie_in.cookie_in_len = cookieinlen; + args.flags = flags; + args.dname = dname == NULL ? NULL : (char **) &dname; + args.resource = resource; + args.dom_xml = (char *) dom_xml; + + if (call(dconn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3, + (xdrproc_t) xdr_remote_domain_migrate_prepare_tunnel3_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_prepare_tunnel3_ret, (char *) &ret) == -1) { + remoteStreamRelease(st); + goto done; + } + + if (ret.cookie_out.cookie_out_len > 0) { + if (!cookieout || !cookieoutlen) { + remoteError(VIR_ERR_INTERNAL_ERROR, "%s", + _("caller ignores cookieout or cookieoutlen")); + goto error; + } + *cookieout = ret.cookie_out.cookie_out_val; /* Caller frees. */ + *cookieoutlen = ret.cookie_out.cookie_out_len; + } + + rv = 0; + +done: + remoteDriverUnlock(priv); + return rv; + +error: + VIR_FREE(ret.cookie_out.cookie_out_val); + goto done; +} + + +static int +remoteDomainMigratePerform3(virDomainPtr dom, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + int rv = -1; + remote_domain_migrate_perform3_args args; + remote_domain_migrate_perform3_ret ret; + struct private_data *priv = dom->conn->privateData; + + remoteDriverLock(priv); + + memset(&args, 0, sizeof(args)); + memset(&ret, 0, sizeof(ret)); + + make_nonnull_domain(&args.dom, dom); + + args.cookie_in.cookie_in_val = (char *)cookiein; + args.cookie_in.cookie_in_len = cookieinlen; + args.uri = (char *) uri; + args.flags = flags; + args.dname = dname == NULL ? NULL : (char **) &dname; + args.resource = resource; + + if (call (dom->conn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_PERFORM3, + (xdrproc_t) xdr_remote_domain_migrate_perform3_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_perform3_ret, (char *) &ret) == -1) + goto done; + + if (ret.cookie_out.cookie_out_len > 0) { + if (!cookieout || !cookieoutlen) { + remoteError(VIR_ERR_INTERNAL_ERROR, "%s", + _("caller ignores cookieout or cookieoutlen")); + goto error; + } + *cookieout = ret.cookie_out.cookie_out_val; /* Caller frees. */ + *cookieoutlen = ret.cookie_out.cookie_out_len; + } + + rv = 0; + +done: + remoteDriverUnlock(priv); + return rv; + +error: + VIR_FREE(ret.cookie_out.cookie_out_val); + goto done; +} + + +static int +remoteDomainMigrateFinish3(virConnectPtr dconn, + const char *dname, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + int cancelled, + virDomainPtr *ddom) +{ + remote_domain_migrate_finish3_args args; + remote_domain_migrate_finish3_ret ret; + struct private_data *priv = dconn->privateData; + int rv = -1; + + remoteDriverLock(priv); + + *ddom = NULL; + memset(&args, 0, sizeof(args)); + memset(&ret, 0, sizeof(ret)); + + args.cookie_in.cookie_in_val = (char *)cookiein; + args.cookie_in.cookie_in_len = cookieinlen; + args.dname = (char *) dname; + args.uri = (char *) uri; + args.flags = flags; + args.cancelled = cancelled; + + if (call (dconn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_FINISH3, + (xdrproc_t) xdr_remote_domain_migrate_finish3_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_finish3_ret, (char *) &ret) == -1) + goto done; + + *ddom = get_domain(dconn, ret.ddom); + + if (ret.cookie_out.cookie_out_len > 0) { + if (!cookieout || !cookieoutlen) { + remoteError(VIR_ERR_INTERNAL_ERROR, "%s", + _("caller ignores cookieout or cookieoutlen")); + goto error; + } + *cookieout = ret.cookie_out.cookie_out_val; /* Caller frees. */ + *cookieoutlen = ret.cookie_out.cookie_out_len; + ret.cookie_out.cookie_out_val = NULL; + ret.cookie_out.cookie_out_len = 0; + } + + xdr_free ((xdrproc_t) &xdr_remote_domain_migrate_finish3_ret, (char *) &ret); + + rv = 0; + +done: + remoteDriverUnlock(priv); + return rv; + +error: + VIR_FREE(ret.cookie_out.cookie_out_val); + goto done; +} + + +static int +remoteDomainMigrateConfirm3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + unsigned long flags, + int cancelled) +{ + int rv = -1; + remote_domain_migrate_confirm3_args args; + struct private_data *priv = domain->conn->privateData; + + remoteDriverLock(priv); + + memset(&args, 0, sizeof(args)); + + make_nonnull_domain (&args.dom, domain); + args.cookie_in.cookie_in_len = cookieinlen; + args.cookie_in.cookie_in_val = (char *) cookiein; + args.flags = flags; + args.cancelled = cancelled; + + if (call (domain->conn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_CONFIRM3, + (xdrproc_t) xdr_remote_domain_migrate_confirm3_args, (char *) &args, + (xdrproc_t) xdr_void, (char *) NULL) == -1) + goto done; + + rv = 0; + +done: + remoteDriverUnlock(priv); + return rv; +} + + #include "remote_client_bodies.h" #include "qemu_client_bodies.h" + /*----------------------------------------------------------------------*/ static struct remote_thread_call * @@ -6262,6 +6604,22 @@ remoteDomainEventQueueFlush(int timer ATTRIBUTE_UNUSED, void *opaque) * but if they do then virterror_internal.has been set. */ static virDomainPtr +get_domain (virConnectPtr conn, remote_domain domain) +{ + virDomainPtr dom = NULL; + if (domain) { + dom = virGetDomain (conn, domain->name, BAD_CAST domain->uuid); + if (dom) dom->id = domain->id; + } + return dom; +} + +/* get_nonnull_domain and get_nonnull_network turn an on-wire + * (name, uuid) pair into virDomainPtr or virNetworkPtr object. + * These can return NULL if underlying memory allocations fail, + * but if they do then virterror_internal.has been set. + */ +static virDomainPtr get_nonnull_domain (virConnectPtr conn, remote_nonnull_domain domain) { virDomainPtr dom; @@ -6499,12 +6857,12 @@ static virDriver remote_driver = { remoteQemuDomainMonitorCommand, /* qemuDomainMonitorCommand */ remoteDomainOpenConsole, /* domainOpenConsole */ remoteDomainInjectNMI, /* domainInjectNMI */ - NULL, /* domainMigrateBegin3 */ - NULL, /* domainMigratePrepare3 */ - NULL, /* domainMigratePrepareTunnel3 */ - NULL, /* domainMigratePerform3 */ - NULL, /* domainMigrateFinish3 */ - NULL, /* domainMigrateConfirm3 */ + remoteDomainMigrateBegin3, /* domainMigrateBegin3 */ + remoteDomainMigratePrepare3, /* domainMigratePrepare3 */ + remoteDomainMigratePrepareTunnel3, /* domainMigratePrepareTunnel3 */ + remoteDomainMigratePerform3, /* domainMigratePerform3 */ + remoteDomainMigrateFinish3, /* domainMigrateFinish3 */ + remoteDomainMigrateConfirm3, /* domainMigrateConfirm3 */ }; static virNetworkDriver network_driver = { diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 2cf6022..ec083a4 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -1945,6 +1945,76 @@ struct remote_storage_vol_download_args { unsigned int flags; }; +struct remote_domain_migrate_begin3_args { + remote_nonnull_domain dom; + unsigned hyper flags; + remote_string dname; + unsigned hyper resource; +}; + +struct remote_domain_migrate_begin3_ret { + opaque cookie_out<REMOTE_MIGRATE_COOKIE_MAX>; + remote_nonnull_string xml; +}; + +struct remote_domain_migrate_prepare3_args { + opaque cookie_in<REMOTE_MIGRATE_COOKIE_MAX>; + remote_string uri_in; + unsigned hyper flags; + remote_string dname; + unsigned hyper resource; + remote_nonnull_string dom_xml; +}; + +struct remote_domain_migrate_prepare3_ret { + opaque cookie_out<REMOTE_MIGRATE_COOKIE_MAX>; + remote_string uri_out; +}; + +struct remote_domain_migrate_prepare_tunnel3_args { + opaque cookie_in<REMOTE_MIGRATE_COOKIE_MAX>; + unsigned hyper flags; + remote_string dname; + unsigned hyper resource; + remote_nonnull_string dom_xml; +}; + +struct remote_domain_migrate_prepare_tunnel3_ret { + opaque cookie_out<REMOTE_MIGRATE_COOKIE_MAX>; +}; + +struct remote_domain_migrate_perform3_args { + remote_nonnull_domain dom; + opaque cookie_in<REMOTE_MIGRATE_COOKIE_MAX>; + remote_nonnull_string uri; + unsigned hyper flags; + remote_string dname; + unsigned hyper resource; +}; + +struct remote_domain_migrate_perform3_ret { + opaque cookie_out<REMOTE_MIGRATE_COOKIE_MAX>; +}; + +struct remote_domain_migrate_finish3_args { + remote_nonnull_string dname; + opaque cookie_in<REMOTE_MIGRATE_COOKIE_MAX>; + remote_nonnull_string uri; + unsigned hyper flags; + int cancelled; +}; + +struct remote_domain_migrate_finish3_ret { + remote_domain ddom; + opaque cookie_out<REMOTE_MIGRATE_COOKIE_MAX>; +}; + +struct remote_domain_migrate_confirm3_args { + remote_nonnull_domain dom; + opaque cookie_in<REMOTE_MIGRATE_COOKIE_MAX>; + unsigned hyper flags; + int cancelled; +}; /*----- Protocol. -----*/ @@ -2182,7 +2252,14 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_SPEED = 207, REMOTE_PROC_STORAGE_VOL_UPLOAD = 208, REMOTE_PROC_STORAGE_VOL_DOWNLOAD = 209, - REMOTE_PROC_DOMAIN_INJECT_NMI = 210 + REMOTE_PROC_DOMAIN_INJECT_NMI = 210, + + REMOTE_PROC_DOMAIN_MIGRATE_BEGIN3 = 211, + REMOTE_PROC_DOMAIN_MIGRATE_PREPARE3 = 212, + REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL3 = 213, + REMOTE_PROC_DOMAIN_MIGRATE_PERFORM3 = 214, + REMOTE_PROC_DOMAIN_MIGRATE_FINISH3 = 215, + REMOTE_PROC_DOMAIN_MIGRATE_CONFIRM3 = 216 /* * Notice how the entries are grouped in sets of 10 ? diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs index 0507a91..27cd6a4 100644 --- a/src/remote_protocol-structs +++ b/src/remote_protocol-structs @@ -1431,6 +1431,96 @@ struct remote_storage_vol_download_args { uint64_t length; u_int flags; }; +struct remote_domain_migrate_begin3_args { + remote_nonnull_domain dom; + uint64_t flags; + remote_string dname; + uint64_t resource; +}; +struct remote_domain_migrate_begin3_ret { + struct { + u_int cookie_out_len; + char * cookie_out_val; + } cookie_out; + remote_nonnull_string xml; +}; +struct remote_domain_migrate_prepare3_args { + struct { + u_int cookie_in_len; + char * cookie_in_val; + } cookie_in; + remote_string uri_in; + uint64_t flags; + remote_string dname; + uint64_t resource; + remote_nonnull_string dom_xml; +}; +struct remote_domain_migrate_prepare3_ret { + struct { + u_int cookie_out_len; + char * cookie_out_val; + } cookie_out; + remote_string uri_out; +}; +struct remote_domain_migrate_prepare_tunnel3_args { + struct { + u_int cookie_in_len; + char * cookie_in_val; + } cookie_in; + uint64_t flags; + remote_string dname; + uint64_t resource; + remote_nonnull_string dom_xml; +}; +struct remote_domain_migrate_prepare_tunnel3_ret { + struct { + u_int cookie_out_len; + char * cookie_out_val; + } cookie_out; +}; +struct remote_domain_migrate_perform3_args { + remote_nonnull_domain dom; + struct { + u_int cookie_in_len; + char * cookie_in_val; + } cookie_in; + remote_nonnull_string uri; + uint64_t flags; + remote_string dname; + uint64_t resource; +}; +struct remote_domain_migrate_perform3_ret { + struct { + u_int cookie_out_len; + char * cookie_out_val; + } cookie_out; +}; +struct remote_domain_migrate_finish3_args { + remote_nonnull_string dname; + struct { + u_int cookie_in_len; + char * cookie_in_val; + } cookie_in; + remote_nonnull_string uri; + uint64_t flags; + int cancelled; +}; +struct remote_domain_migrate_finish3_ret { + remote_domain ddom; + struct { + u_int cookie_out_len; + char * cookie_out_val; + } cookie_out; +}; +struct remote_domain_migrate_confirm3_args { + remote_nonnull_domain dom; + struct { + u_int cookie_in_len; + char * cookie_in_val; + } cookie_in; + uint64_t flags; + int cancelled; +}; struct remote_message_header { u_int prog; u_int vers; -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
* src/remote/remote_protocol.x: Define wire protocol for migration protocol v3 * daemon/remote.c: Server side dispatch * src/remote/remote_driver.c: Client side serialization * src/remote/remote_protocol.c, src/remote/remote_protocol.h, daemon/remote_dispatch_args.h, daemon/remote_dispatch_prototypes.h, daemon/remote_dispatch_ret.h, daemon/remote_dispatch_table.h: Re-generate files * src/remote_protocol-structs: Declare new ABIs
It's a race - who will get their patch in first, you or Matthias? Either way, we'll have a conflict in remote_protocol.x and remote_generator.pl. However, I think it will be pretty trivial for the other guy to resolve the conflict when rebasing. ACK. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

The migration protocol has support for a 'cookie' parameter which is an opaque array of bytes as far as libvirt is concerned. Drivers may use this for passing around arbitrary extra data they might need during migration. The QEMU driver needs todo a few things: - Pass hostname/uuid to allow strict protection against localhost migration attempts - Pass SPICE/VNC server port from the target back to the source to allow seemless relocation of client sessions - Pass lock driver state from source to destination This patch introduces the basic glue for handling cookies but only includes the host/guest UUID & name. * src/libvirt_private.syms: Export virXMLParseStrHelper * src/qemu/qemu_migration.c, src/qemu/qemu_migration.h: Parsing and formatting of migration cookies * src/qemu/qemu_driver.c: Pass in cookie parameters where possible * src/remote/remote_protocol.h, src/remote/remote_protocol.x: Change cookie max length to 16384 bytes --- cfg.mk | 1 + src/libvirt_private.syms | 1 + src/qemu/qemu_driver.c | 20 ++- src/qemu/qemu_migration.c | 351 +++++++++++++++++++++++++++++++++++++++++- src/qemu/qemu_migration.h | 16 ++ src/remote/remote_protocol.x | 2 +- 6 files changed, 377 insertions(+), 14 deletions(-) diff --git a/cfg.mk b/cfg.mk index 9ee0dd0..b142b6d 100644 --- a/cfg.mk +++ b/cfg.mk @@ -80,6 +80,7 @@ VC_LIST_ALWAYS_EXCLUDE_REGEX = ^(HACKING|docs/news\.html\.in)$$ useless_free_options = \ --name=VIR_FREE \ --name=qemuCapsFree \ + --name=qemuMigrationCookieFree \ --name=sexpr_free \ --name=virBitmapFree \ --name=virCPUDefFree \ diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 81def5c..2abed07 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -1007,6 +1007,7 @@ virStrerror; # xml.h +virXMLParseStrHelper; virXMLPropString; virXPathBoolean; virXPathInt; diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 5a35f9f..faddf18 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5668,8 +5668,9 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn, } qemuDriverLock(driver); - ret = qemuMigrationPrepareTunnel(driver, dconn, st, - dname, dom_xml); + ret = qemuMigrationPrepareTunnel(driver, dconn, + NULL, 0, NULL, NULL, /* No cookies in v2 */ + st, dname, dom_xml); qemuDriverUnlock(driver); cleanup: @@ -5682,8 +5683,8 @@ cleanup: */ static int ATTRIBUTE_NONNULL (5) qemudDomainMigratePrepare2 (virConnectPtr dconn, - char **cookie ATTRIBUTE_UNUSED, - int *cookielen ATTRIBUTE_UNUSED, + char **cookie, + int *cookielen, const char *uri_in, char **uri_out, unsigned long flags, @@ -5722,6 +5723,8 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn, } ret = qemuMigrationPrepareDirect(driver, dconn, + NULL, 0, /* No input cookies in v2 */ + cookie, cookielen, uri_in, uri_out, dname, dom_xml); @@ -5765,8 +5768,9 @@ qemudDomainMigratePerform (virDomainPtr dom, } ret = qemuMigrationPerform(driver, dom->conn, vm, - uri, flags, - dname, resource); + uri, cookie, cookielen, + NULL, NULL, /* No output cookies in v2 */ + flags, dname, resource); cleanup: qemuDriverUnlock(driver); @@ -5809,7 +5813,9 @@ qemudDomainMigrateFinish2 (virConnectPtr dconn, goto cleanup; } - dom = qemuMigrationFinish(driver, dconn, vm, flags, retcode); + dom = qemuMigrationFinish(driver, dconn, vm, + NULL, 0, NULL, NULL, /* No cookies in v2 */ + flags, retcode); cleanup: if (orig_err) { diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6738a53..594100c 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -22,6 +22,8 @@ #include <config.h> #include <sys/time.h> +#include <gnutls/gnutls.h> +#include <gnutls/x509.h> #include "qemu_migration.h" #include "qemu_monitor.h" @@ -38,11 +40,271 @@ #include "files.h" #include "datatypes.h" #include "fdstream.h" +#include "uuid.h" + #define VIR_FROM_THIS VIR_FROM_QEMU #define timeval_to_ms(tv) (((tv).tv_sec * 1000ull) + ((tv).tv_usec / 1000)) +typedef struct _qemuMigrationCookie qemuMigrationCookie; +typedef qemuMigrationCookie *qemuMigrationCookiePtr; +struct _qemuMigrationCookie { + int flags; + + /* Host properties */ + unsigned char hostuuid[VIR_UUID_BUFLEN]; + char *hostname; + + /* Guest properties */ + unsigned char uuid[VIR_UUID_BUFLEN]; + char *name; +}; + + +static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig) +{ + if (!mig) + return; + + VIR_FREE(mig->hostname); + VIR_FREE(mig->name); + VIR_FREE(mig); +} + + +static qemuMigrationCookiePtr +qemuMigrationCookieNew(virDomainObjPtr dom) +{ + qemuMigrationCookiePtr mig = NULL; + + if (VIR_ALLOC(mig) < 0) + goto no_memory; + + if (!(mig->name = strdup(dom->def->name))) + goto no_memory; + memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN); + + if (!(mig->hostname = virGetHostname(NULL))) + goto no_memory; + if (virGetHostUUID(mig->hostuuid) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Unable to obtain host UUID")); + goto error; + } + + return mig; + +no_memory: + virReportOOMError(); +error: + qemuMigrationCookieFree(mig); + return NULL; +} + + +static void qemuMigrationCookieXMLFormat(virBufferPtr buf, + qemuMigrationCookiePtr mig) +{ + char uuidstr[VIR_UUID_STRING_BUFLEN]; + char hostuuidstr[VIR_UUID_STRING_BUFLEN]; + + virUUIDFormat(mig->uuid, uuidstr); + virUUIDFormat(mig->hostuuid, hostuuidstr); + + virBufferAsprintf(buf, "<qemu-migration>\n"); + virBufferEscapeString(buf, " <name>%s</name>\n", mig->name); + virBufferAsprintf(buf, " <uuid>%s</uuid>\n", uuidstr); + virBufferEscapeString(buf, " <hostname>%s</hostname>\n", mig->hostname); + virBufferAsprintf(buf, " <hostuuid>%s</hostuuid>\n", hostuuidstr); + virBufferAddLit(buf, "</qemu-migration>\n"); +} + + +static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig) +{ + virBuffer buf = VIR_BUFFER_INITIALIZER; + + qemuMigrationCookieXMLFormat(&buf, mig); + + if (virBufferError(&buf)) { + virReportOOMError(); + return NULL; + } + + return virBufferContentAndReset(&buf); +} + + +static int +qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, + xmlXPathContextPtr ctxt, + int flags ATTRIBUTE_UNUSED) +{ + char uuidstr[VIR_UUID_STRING_BUFLEN]; + char *tmp; + + /* We don't store the uuid, name, hostname, or hostuuid + * values. We just compare them to local data todo some + * sanity checking on migration operation + */ + + /* Extract domain name */ + if (!(tmp = virXPathString("string(./name[1])", ctxt))) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing name element in migration data")); + goto error; + } + if (STRNEQ(tmp, mig->name)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Incoming cookie data had unexpected name %s vs %s"), + tmp, mig->name); + goto error; + } + VIR_FREE(tmp); + + /* Extract domain uuid */ + tmp = virXPathString("string(./uuid[1])", ctxt); + if (!tmp) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing uuid element in migration data")); + goto error; + } + virUUIDFormat(mig->uuid, uuidstr); + if (STRNEQ(tmp, uuidstr)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Incoming cookie data had unexpected UUID %s vs %s"), + tmp, uuidstr); + } + VIR_FREE(tmp); + + /* Check & forbid "localhost" migration */ + if (!(tmp = virXPathString("string(./hostname[1])", ctxt))) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing hostname element in migration data")); + goto error; + } + if (STREQ(tmp, mig->hostname)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Attempt to migrate guest to the same host %s"), + tmp); + goto error; + } + VIR_FREE(tmp); + + if (!(tmp = virXPathString("string(./hostuuid[1])", ctxt))) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing hostuuid element in migration data")); + goto error; + } + virUUIDFormat(mig->hostuuid, uuidstr); + if (STREQ(tmp, uuidstr)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Attempt to migrate guest to the same host %s"), + tmp); + goto error; + } + VIR_FREE(tmp); + + return 0; + +error: + VIR_FREE(tmp); + return -1; +} + + +static int +qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, + const char *xml, + int flags) +{ + xmlDocPtr doc = NULL; + xmlXPathContextPtr ctxt = NULL; + int ret; + + VIR_DEBUG("xml=%s", NULLSTR(xml)); + + if (!(doc = virXMLParseString(xml, "qemumigration.xml"))) + goto cleanup; + + if ((ctxt = xmlXPathNewContext(doc)) == NULL) { + virReportOOMError(); + goto cleanup; + } + + ctxt->node = xmlDocGetRootElement(doc); + + ret = qemuMigrationCookieXMLParse(mig, ctxt, flags); + +cleanup: + xmlXPathFreeContext(ctxt); + xmlFreeDoc(doc); + + return ret; +} + + +static int +qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, + struct qemud_driver *driver ATTRIBUTE_UNUSED, + virDomainObjPtr dom ATTRIBUTE_UNUSED, + char **cookieout, + int *cookieoutlen, + int flags ATTRIBUTE_UNUSED) +{ + if (!cookieout || !cookieoutlen) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s", + _("missing migration cookie data")); + return -1; + } + + *cookieoutlen = 0; + + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) + return -1; + + *cookieoutlen = strlen(*cookieout) + 1; + + VIR_DEBUG("cookielen=%d cookie=%s", *cookieoutlen, *cookieout); + + return 0; +} + + +static qemuMigrationCookiePtr +qemuMigrationEatCookie(virDomainObjPtr dom, + const char *cookiein, + int cookieinlen, + int flags) +{ + qemuMigrationCookiePtr mig = NULL; + + /* Parse & validate incoming cookie (if any) */ + if (cookiein && cookieinlen && + cookiein[cookieinlen-1] != '\0') { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Migration cookie was not NULL terminated")); + goto error; + } + + VIR_DEBUG("cookielen=%d cookie='%s'", cookieinlen, NULLSTR(cookiein)); + + if (!(mig = qemuMigrationCookieNew(dom))) + return NULL; + + if (cookiein && cookieinlen && + qemuMigrationCookieXMLParseStr(mig, + cookiein, + flags) < 0) + goto error; + + return mig; + +error: + qemuMigrationCookieFree(mig); + return NULL; +} bool qemuMigrationIsAllowed(virDomainDefPtr def) @@ -245,6 +507,10 @@ cleanup: int qemuMigrationPrepareTunnel(struct qemud_driver *driver, virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, virStreamPtr st, const char *dname, const char *dom_xml) @@ -257,6 +523,7 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, int dataFD[2] = { -1, -1 }; qemuDomainObjPrivatePtr priv = NULL; struct timeval now; + qemuMigrationCookiePtr mig = NULL; if (gettimeofday(&now, NULL) < 0) { virReportSystemError(errno, "%s", @@ -292,6 +559,9 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, def = NULL; priv = vm->privateData; + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + goto cleanup; + if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; @@ -342,6 +612,15 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); + + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now.. + */ + VIR_WARN0("Unable to encode migration cookie"); + } + ret = 0; endjob: @@ -369,7 +648,7 @@ cleanup: virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); - qemuDriverUnlock(driver); + qemuMigrationCookieFree(mig); return ret; } @@ -377,6 +656,10 @@ cleanup: int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, const char *uri_in, char **uri_out, const char *dname, @@ -394,6 +677,7 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, int internalret; qemuDomainObjPrivatePtr priv = NULL; struct timeval now; + qemuMigrationCookiePtr mig = NULL; if (gettimeofday(&now, NULL) < 0) { virReportSystemError(errno, "%s", @@ -503,6 +787,9 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, def = NULL; priv = vm->privateData; + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + goto cleanup; + if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; priv->jobActive = QEMU_JOB_MIGRATION_OUT; @@ -528,6 +815,14 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, goto endjob; } + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now.. + */ + VIR_WARN0("Unable to encode migration cookie"); + } + qemuAuditDomainStart(vm, "migrated", true); event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, @@ -560,6 +855,7 @@ cleanup: virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); + qemuMigrationCookieFree(mig); return ret; } @@ -570,6 +866,10 @@ cleanup: static int doNativeMigrate(struct qemud_driver *driver, virDomainObjPtr vm, const char *uri, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, unsigned int flags, const char *dname ATTRIBUTE_UNUSED, unsigned long resource) @@ -578,6 +878,10 @@ static int doNativeMigrate(struct qemud_driver *driver, xmlURIPtr uribits = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; + qemuMigrationCookiePtr mig = NULL; + + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + goto cleanup; /* Issue the migrate command. */ if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) { @@ -621,9 +925,13 @@ static int doNativeMigrate(struct qemud_driver *driver, if (qemuMigrationWaitForCompletion(driver, vm) < 0) goto cleanup; + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) + VIR_WARN0("Unable to encode migration cookie"); + ret = 0; cleanup: + qemuMigrationCookieFree(mig); xmlFreeURI(uribits); return ret; } @@ -902,14 +1210,16 @@ static int doNonTunnelMigrate(struct qemud_driver *driver, virDomainPtr ddomain = NULL; int retval = -1; char *uri_out = NULL; + char *cookie = NULL; + int cookielen = 0; int rc; qemuDomainObjEnterRemoteWithDriver(driver, vm); /* NB we don't pass 'uri' into this, since that's the libvirtd * URI in this context - so we let dest pick it */ rc = dconn->driver->domainMigratePrepare2(dconn, - NULL, /* cookie */ - 0, /* cookielen */ + &cookie, + &cookielen, NULL, /* uri */ &uri_out, flags, dname, @@ -934,7 +1244,10 @@ static int doNonTunnelMigrate(struct qemud_driver *driver, goto cleanup; } - if (doNativeMigrate(driver, vm, uri_out, flags, dname, resource) < 0) + if (doNativeMigrate(driver, vm, uri_out, + cookie, cookielen, + NULL, NULL, /* No out cookie with v2 migration */ + flags, dname, resource) < 0) goto finish; retval = 0; @@ -943,13 +1256,14 @@ finish: dname = dname ? dname : vm->def->name; qemuDomainObjEnterRemoteWithDriver(driver, vm); ddomain = dconn->driver->domainMigrateFinish2 - (dconn, dname, NULL, 0, uri_out, flags, retval); + (dconn, dname, cookie, cookielen, uri_out, flags, retval); qemuDomainObjExitRemoteWithDriver(driver, vm); if (ddomain) virUnrefDomain(ddomain); cleanup: + VIR_FREE(cookie); return retval; } @@ -1025,6 +1339,10 @@ int qemuMigrationPerform(struct qemud_driver *driver, virConnectPtr conn, virDomainObjPtr vm, const char *uri, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, unsigned long flags, const char *dname, unsigned long resource) @@ -1054,11 +1372,19 @@ int qemuMigrationPerform(struct qemud_driver *driver, } if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) { + if (cookieinlen) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("received unexpected cookie with P2P migration")); + goto endjob; + } + if (doPeer2PeerMigrate(driver, vm, uri, flags, dname, resource) < 0) /* doPeer2PeerMigrate already set the error, so just get out */ goto endjob; } else { - if (doNativeMigrate(driver, vm, uri, flags, dname, resource) < 0) + if (doNativeMigrate(driver, vm, uri, cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, dname, resource) < 0) goto endjob; } @@ -1076,6 +1402,7 @@ int qemuMigrationPerform(struct qemud_driver *driver, virDomainRemoveInactive(&driver->domains, vm); vm = NULL; } + ret = 0; endjob: @@ -1152,6 +1479,10 @@ virDomainPtr qemuMigrationFinish(struct qemud_driver *driver, virConnectPtr dconn, virDomainObjPtr vm, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, unsigned long flags, int retcode) { @@ -1159,6 +1490,7 @@ qemuMigrationFinish(struct qemud_driver *driver, virDomainEventPtr event = NULL; int newVM = 1; qemuDomainObjPrivatePtr priv = NULL; + qemuMigrationCookiePtr mig = NULL; priv = vm->privateData; if (priv->jobActive != QEMU_JOB_MIGRATION_IN) { @@ -1169,6 +1501,9 @@ qemuMigrationFinish(struct qemud_driver *driver, priv->jobActive = QEMU_JOB_NONE; memset(&priv->jobInfo, 0, sizeof(priv->jobInfo)); + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + goto cleanup; + if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) goto cleanup; @@ -1254,6 +1589,9 @@ qemuMigrationFinish(struct qemud_driver *driver, } } + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) + VIR_WARN0("Unable to encode migration cookie"); + endjob: if (vm && qemuDomainObjEndJob(vm) == 0) @@ -1264,6 +1602,7 @@ cleanup: virDomainObjUnlock(vm); if (event) qemuDomainEventQueue(driver, event); + qemuMigrationCookieFree(mig); return dom; } diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index c0f3aa2..0db176b 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -34,12 +34,20 @@ int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr int qemuMigrationPrepareTunnel(struct qemud_driver *driver, virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, virStreamPtr st, const char *dname, const char *dom_xml); int qemuMigrationPrepareDirect(struct qemud_driver *driver, virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, const char *uri_in, char **uri_out, const char *dname, @@ -49,6 +57,10 @@ int qemuMigrationPerform(struct qemud_driver *driver, virConnectPtr conn, virDomainObjPtr vm, const char *uri, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, unsigned long flags, const char *dname, unsigned long resource); @@ -56,6 +68,10 @@ int qemuMigrationPerform(struct qemud_driver *driver, virDomainPtr qemuMigrationFinish(struct qemud_driver *driver, virConnectPtr dconn, virDomainObjPtr vm, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, unsigned long flags, int retcode); diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index ec083a4..134c0e7 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -99,7 +99,7 @@ const REMOTE_VCPUINFO_MAX = 2048; const REMOTE_CPUMAPS_MAX = 16384; /* Upper limit on migrate cookie. */ -const REMOTE_MIGRATE_COOKIE_MAX = 256; +const REMOTE_MIGRATE_COOKIE_MAX = 16384; /* Upper limit on lists of network names. */ const REMOTE_NETWORK_NAME_LIST_MAX = 256; -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
The migration protocol has support for a 'cookie' parameter which is an opaque array of bytes as far as libvirt is concerned. Drivers may use this for passing around arbitrary extra data they might need during migration. The QEMU driver needs todo a few things:
s/todo/to do/
- Pass hostname/uuid to allow strict protection against localhost migration attempts - Pass SPICE/VNC server port from the target back to the source to allow seemless relocation of client sessions
s/seemless/seamless/
- Pass lock driver state from source to destination
This patch introduces the basic glue for handling cookies but only includes the host/guest UUID & name.
* src/libvirt_private.syms: Export virXMLParseStrHelper * src/qemu/qemu_migration.c, src/qemu/qemu_migration.h: Parsing and formatting of migration cookies * src/qemu/qemu_driver.c: Pass in cookie parameters where possible * src/remote/remote_protocol.h, src/remote/remote_protocol.x: Change cookie max length to 16384 bytes --- cfg.mk | 1 + src/libvirt_private.syms | 1 + src/qemu/qemu_driver.c | 20 ++- src/qemu/qemu_migration.c | 351 +++++++++++++++++++++++++++++++++++++++++- src/qemu/qemu_migration.h | 16 ++ src/remote/remote_protocol.x | 2 +- 6 files changed, 377 insertions(+), 14 deletions(-)
@@ -342,6 +612,15 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); + + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { + /* We could tear down the whole guest here, but + * cookie data is (so far) non-critical, so that + * seems a little harsh. We'll just warn for now..
Double .
+ */ + VIR_WARN0("Unable to encode migration cookie");
and more rebase fun. ACK with this squashed in: diff --git i/src/qemu/qemu_migration.c w/src/qemu/qemu_migration.c index 79a8204..0ffdadd 100644 --- i/src/qemu/qemu_migration.c +++ w/src/qemu/qemu_migration.c @@ -145,7 +145,7 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, char *tmp; /* We don't store the uuid, name, hostname, or hostuuid - * values. We just compare them to local data todo some + * values. We just compare them to local data to do some * sanity checking on migration operation */ @@ -616,9 +616,9 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { /* We could tear down the whole guest here, but * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now.. + * seems a little harsh. We'll just warn for now. */ - VIR_WARN0("Unable to encode migration cookie"); + VIR_WARN("Unable to encode migration cookie"); } ret = 0; @@ -818,9 +818,9 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { /* We could tear down the whole guest here, but * cookie data is (so far) non-critical, so that - * seems a little harsh. We'll just warn for now.. + * seems a little harsh. We'll just warn for now. */ - VIR_WARN0("Unable to encode migration cookie"); + VIR_WARN("Unable to encode migration cookie"); } qemuAuditDomainStart(vm, "migrated", true); @@ -926,7 +926,7 @@ static int doNativeMigrate(struct qemud_driver *driver, goto cleanup; if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) - VIR_WARN0("Unable to encode migration cookie"); + VIR_WARN("Unable to encode migration cookie"); ret = 0; @@ -1590,7 +1590,7 @@ qemuMigrationFinish(struct qemud_driver *driver, } if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) - VIR_WARN0("Unable to encode migration cookie"); + VIR_WARN("Unable to encode migration cookie"); endjob: if (vm && -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

Extend the QEMU migration cookie structure to allow information about the destination host graphics setup to be passed by to the source host. This will enable seamless migration of any connected graphics clients * src/qemu/qemu_migration.c: Add graphics info to migration cookies * daemon/libvirtd.c: Always initialize gnutls to enable x509 cert parsing in QEMU --- cfg.mk | 1 + daemon/libvirtd.c | 8 +- src/qemu/qemu_migration.c | 264 +++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 261 insertions(+), 12 deletions(-) diff --git a/cfg.mk b/cfg.mk index b142b6d..aed6a8f 100644 --- a/cfg.mk +++ b/cfg.mk @@ -81,6 +81,7 @@ useless_free_options = \ --name=VIR_FREE \ --name=qemuCapsFree \ --name=qemuMigrationCookieFree \ + --name=qemuMigrationCookieGraphicsFree \ --name=sexpr_free \ --name=virBitmapFree \ --name=virCPUDefFree \ diff --git a/daemon/libvirtd.c b/daemon/libvirtd.c index 42cbe5d..ae04078 100644 --- a/daemon/libvirtd.c +++ b/daemon/libvirtd.c @@ -317,9 +317,6 @@ remoteInitializeGnuTLS (void) { int err; - /* Initialise GnuTLS. */ - gnutls_global_init (); - err = gnutls_certificate_allocate_credentials (&x509_cred); if (err) { VIR_ERROR(_("gnutls_certificate_allocate_credentials: %s"), @@ -3310,6 +3307,11 @@ int main(int argc, char **argv) { goto error; } + /* Initialise GnuTLS. Required even if we don't use TLS + * for libvirtd, because QEMU driver needs to be able to + * parse x590 certificates for seemless migration */ + gnutls_global_init(); + if (!(server = qemudInitialize())) { ret = VIR_DAEMON_ERR_INIT; goto error; diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 594100c..5fc09f7 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -47,6 +47,20 @@ #define timeval_to_ms(tv) (((tv).tv_sec * 1000ull) + ((tv).tv_usec / 1000)) +enum qemuMigrationCookieFlags { + QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << 0), +}; + +typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; +typedef qemuMigrationCookieGraphics *qemuMigrationCookieGraphicsPtr; +struct _qemuMigrationCookieGraphics { + int type; + int port; + int tlsPort; + char *listen; + char *tlsSubject; +}; + typedef struct _qemuMigrationCookie qemuMigrationCookie; typedef qemuMigrationCookie *qemuMigrationCookiePtr; struct _qemuMigrationCookie { @@ -59,20 +73,142 @@ struct _qemuMigrationCookie { /* Guest properties */ unsigned char uuid[VIR_UUID_BUFLEN]; char *name; + + /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */ + qemuMigrationCookieGraphicsPtr graphics; }; +static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap) +{ + if (!grap) + return; + VIR_FREE(grap->listen); + VIR_FREE(grap->tlsSubject); + VIR_FREE(grap); +} + static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig) { if (!mig) return; + if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) + qemuMigrationCookieGraphicsFree(mig->graphics); + VIR_FREE(mig->hostname); VIR_FREE(mig->name); VIR_FREE(mig); } +static char * +qemuDomainExtractTLSSubject(const char *certdir) +{ + char *certfile = NULL; + char *subject = NULL; + char *pemdata = NULL; + gnutls_datum_t pemdatum; + gnutls_x509_crt_t cert; + int ret; + size_t subjectlen; + + if (virAsprintf(&certfile, "%s/server-cert.pem", certdir) < 0) + goto no_memory; + + if (virFileReadAll(certfile, 8192, &pemdata) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("unable to read server cert %s"), certfile); + goto error; + } + + ret = gnutls_x509_crt_init(&cert); + if (ret < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cannot initialize cert object: %s"), + gnutls_strerror(ret)); + goto error; + } + + pemdatum.data = (unsigned char *)pemdata; + pemdatum.size = strlen(pemdata); + + ret = gnutls_x509_crt_import(cert, &pemdatum, GNUTLS_X509_FMT_PEM); + if (ret < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cannot load cert data from %s: %s"), + certfile, gnutls_strerror(ret)); + goto error; + } + + subjectlen = 1024; + if (VIR_ALLOC_N(subject, subjectlen+1) < 0) + goto no_memory; + + gnutls_x509_crt_get_dn(cert, subject, &subjectlen); + subject[subjectlen] = '\0'; + + VIR_FREE(certfile); + VIR_FREE(pemdata); + + return subject; + +no_memory: + virReportOOMError(); +error: + VIR_FREE(certfile); + VIR_FREE(pemdata); + return NULL; +} + + +static qemuMigrationCookieGraphicsPtr +qemuMigrationCookieGraphicsAlloc(struct qemud_driver *driver, + virDomainGraphicsDefPtr def) +{ + qemuMigrationCookieGraphicsPtr mig = NULL; + const char *listenAddr; + + if (VIR_ALLOC(mig) < 0) + goto no_memory; + + mig->type = def->type; + if (mig->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) { + mig->port = def->data.vnc.port; + listenAddr = def->data.vnc.listenAddr; + if (!listenAddr) + listenAddr = driver->vncListen; + + if (driver->vncTLS && + !(mig->tlsSubject = qemuDomainExtractTLSSubject(driver->vncTLSx509certdir))) + goto error; + } else { + mig->port = def->data.spice.port; + if (driver->spiceTLS) + mig->tlsPort = def->data.spice.tlsPort; + else + mig->tlsPort = -1; + listenAddr = def->data.spice.listenAddr; + if (!listenAddr) + listenAddr = driver->spiceListen; + + if (driver->spiceTLS && + !(mig->tlsSubject = qemuDomainExtractTLSSubject(driver->spiceTLSx509certdir))) + goto error; + } + if (!(mig->listen = strdup(listenAddr))) + goto no_memory; + + return mig; + +no_memory: + virReportOOMError(); +error: + qemuMigrationCookieGraphicsFree(mig); + return NULL; +} + + static qemuMigrationCookiePtr qemuMigrationCookieNew(virDomainObjPtr dom) { @@ -103,6 +239,47 @@ error: } +static int +qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig, + struct qemud_driver *driver, + virDomainObjPtr dom) +{ + if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Migration graphics data already present")); + return -1; + } + + if (dom->def->ngraphics == 1 && + (dom->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC || + dom->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) && + !(mig->graphics = qemuMigrationCookieGraphicsAlloc(driver, dom->def->graphics[0]))) + return -1; + + mig->flags |= QEMU_MIGRATION_COOKIE_GRAPHICS; + + return 0; +} + + +static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, + qemuMigrationCookieGraphicsPtr grap) +{ + virBufferAsprintf(buf, " <graphics type='%s' port='%d' listen='%s'", + virDomainGraphicsTypeToString(grap->type), + grap->port, grap->listen); + if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) + virBufferAsprintf(buf, " tlsPort='%d'", grap->tlsPort); + if (grap->tlsSubject) { + virBufferAddLit(buf, ">\n"); + virBufferEscapeString(buf, " <cert info='subject' value='%s'/>\n", grap->tlsSubject); + virBufferAddLit(buf, " </graphics>\n"); + } else { + virBufferAddLit(buf, "/>\n"); + } +} + + static void qemuMigrationCookieXMLFormat(virBufferPtr buf, qemuMigrationCookiePtr mig) { @@ -117,6 +294,10 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, virBufferAsprintf(buf, " <uuid>%s</uuid>\n", uuidstr); virBufferEscapeString(buf, " <hostname>%s</hostname>\n", mig->hostname); virBufferAsprintf(buf, " <hostuuid>%s</hostuuid>\n", hostuuidstr); + + if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) + qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics); + virBufferAddLit(buf, "</qemu-migration>\n"); } @@ -136,10 +317,61 @@ static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig) } +static qemuMigrationCookieGraphicsPtr +qemuMigrationCookieGraphicsXMLParse(xmlXPathContextPtr ctxt) +{ + qemuMigrationCookieGraphicsPtr grap; + char *tmp; + + if (VIR_ALLOC(grap) < 0) + goto no_memory; + + if (!(tmp = virXPathString("string(./graphics/@type)", ctxt))) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing type attribute in migration data")); + goto error; + } + if ((grap->type = virDomainGraphicsTypeFromString(tmp)) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("unknown graphics type %s"), tmp); + VIR_FREE(tmp); + goto error; + } + if (virXPathInt("string(./graphics/@port)", ctxt, &grap->port) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing port attribute in migration data")); + goto error; + } + if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) { + if (virXPathInt("string(./graphics/@tlsPort)", ctxt, &grap->tlsPort) < 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing tlsPort attribute in migration data")); + goto error; + } + } + if (!(grap->listen = virXPathString("string(./graphics/@listen)", ctxt))) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("missing listen attribute in migration data")); + goto error; + } + /* Optional */ + grap->tlsSubject = virXPathString("string(./graphics/cert[ info='subject']/@value)", ctxt); + + + return grap; + +no_memory: + virReportOOMError(); +error: + qemuMigrationCookieGraphicsFree(grap); + return NULL; +} + + static int qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, xmlXPathContextPtr ctxt, - int flags ATTRIBUTE_UNUSED) + int flags) { char uuidstr[VIR_UUID_STRING_BUFLEN]; char *tmp; @@ -206,6 +438,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, } VIR_FREE(tmp); + if ((flags & QEMU_MIGRATION_COOKIE_GRAPHICS) && + virXPathBoolean("count(./graphics) > 0", ctxt) && + (!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt)))) + goto error; + return 0; error: @@ -247,11 +484,11 @@ cleanup: static int qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, - struct qemud_driver *driver ATTRIBUTE_UNUSED, - virDomainObjPtr dom ATTRIBUTE_UNUSED, + struct qemud_driver *driver, + virDomainObjPtr dom, char **cookieout, int *cookieoutlen, - int flags ATTRIBUTE_UNUSED) + int flags) { if (!cookieout || !cookieoutlen) { qemuReportError(VIR_ERR_INVALID_ARG, "%s", @@ -261,6 +498,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, *cookieoutlen = 0; + if (flags & QEMU_MIGRATION_COOKIE_GRAPHICS && + qemuMigrationCookieAddGraphics(mig, driver, dom) < 0) + return -1; + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) return -1; @@ -559,7 +800,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, def = NULL; priv = vm->privateData; - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) @@ -613,7 +855,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_MIGRATED); - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { /* We could tear down the whole guest here, but * cookie data is (so far) non-critical, so that * seems a little harsh. We'll just warn for now.. @@ -787,7 +1030,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, def = NULL; priv = vm->privateData; - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0) @@ -815,7 +1059,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver, goto endjob; } - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) { + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) { /* We could tear down the whole guest here, but * cookie data is (so far) non-critical, so that * seems a little harsh. We'll just warn for now.. @@ -880,7 +1125,8 @@ static int doNativeMigrate(struct qemud_driver *driver, unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; qemuMigrationCookiePtr mig = NULL; - if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; /* Issue the migrate command. */ -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
Extend the QEMU migration cookie structure to allow information about the destination host graphics setup to be passed by to the source host. This will enable seamless migration of any connected graphics clients
* src/qemu/qemu_migration.c: Add graphics info to migration cookies * daemon/libvirtd.c: Always initialize gnutls to enable x509 cert parsing in QEMU
+++ b/daemon/libvirtd.c @@ -317,9 +317,6 @@ remoteInitializeGnuTLS (void) { int err;
- /* Initialise GnuTLS. */ - gnutls_global_init (); - err = gnutls_certificate_allocate_credentials (&x509_cred); if (err) { VIR_ERROR(_("gnutls_certificate_allocate_credentials: %s"), @@ -3310,6 +3307,11 @@ int main(int argc, char **argv) { goto error; }
+ /* Initialise GnuTLS. Required even if we don't use TLS + * for libvirtd, because QEMU driver needs to be able to + * parse x590 certificates for seemless migration */
s/seemless/seamless/ ACK with that nit fixed. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

Use the graphics information from the QEMU migration cookie to issue a 'client_migrate_info' monitor command to QEMU. This causes the SPICE client to automatically reconnect to the target host when migration completes * src/qemu/qemu_migration.c: Set data for SPICE client relocation before starting migration on src * src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h, src/qemu/qemu_monitor_json.c, src/qemu/qemu_monitor_json.h, src/qemu/qemu_monitor_text.c, src/qemu/qemu_monitor_text.h: Add new qemuMonitorGraphicsRelocate() command --- src/qemu/qemu_migration.c | 39 +++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor.c | 31 +++++++++++++++++++++++++++++++ src/qemu/qemu_monitor.h | 6 ++++++ src/qemu/qemu_monitor_json.c | 32 ++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor_json.h | 7 +++++++ src/qemu/qemu_monitor_text.c | 31 +++++++++++++++++++++++++++++++ src/qemu/qemu_monitor_text.h | 7 +++++++ 7 files changed, 153 insertions(+), 0 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 5fc09f7..98305c6 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -740,6 +740,39 @@ cleanup: } +static int +qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver, + virDomainObjPtr vm, + qemuMigrationCookiePtr cookie) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + int ret; + + if (!cookie) + return 0; + + if (!cookie->graphics) + return 0; + + /* QEMU doesnt' support VNC relocation yet, so + * skip it to avoid generating an error + */ + if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) + return 0; + + qemuDomainObjEnterMonitorWithDriver(driver, vm); + ret = qemuMonitorGraphicsRelocate(priv->mon, + cookie->graphics->type, + cookie->hostname, + cookie->graphics->port, + cookie->graphics->tlsPort, + cookie->graphics->tlsSubject); + qemuDomainObjExitMonitorWithDriver(driver, vm); + + return ret; +} + + /* Prepare is the first step, and it runs on the destination host. * * This version starts an empty VM listening on a localhost TCP port, and @@ -1129,6 +1162,9 @@ static int doNativeMigrate(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup; + if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) + VIR_WARN0("unable to provide data for graphics client relocation"); + /* Issue the migrate command. */ if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) { /* HACK: source host generates bogus URIs, so fix them up */ @@ -1261,6 +1297,9 @@ static int doTunnelMigrate(struct qemud_driver *driver, * 3. start migration on source */ + /* + * XXX need to support migration cookies + */ /* Stage 1. setup local support infrastructure */ diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 9f0f20d..b658dab 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -1620,6 +1620,37 @@ int qemuMonitorMigrateCancel(qemuMonitorPtr mon) return ret; } + +int qemuMonitorGraphicsRelocate(qemuMonitorPtr mon, + int type, + const char *hostname, + int port, + int tlsPort, + const char *tlsSubject) +{ + int ret; + VIR_DEBUG("mon=%p type=%d hostname=%s port=%d tlsPort=%d tlsSubject=%s", + mon, type, hostname, port, tlsPort, NULLSTR(tlsSubject)); + + if (mon->json) + ret = qemuMonitorJSONGraphicsRelocate(mon, + type, + hostname, + port, + tlsPort, + tlsSubject); + else + ret = qemuMonitorTextGraphicsRelocate(mon, + type, + hostname, + port, + tlsPort, + tlsSubject); + + return ret; +} + + int qemuMonitorAddUSBDisk(qemuMonitorPtr mon, const char *path) { diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index b84e230..8f2baff 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -304,6 +304,12 @@ int qemuMonitorMigrateToUnix(qemuMonitorPtr mon, int qemuMonitorMigrateCancel(qemuMonitorPtr mon); +int qemuMonitorGraphicsRelocate(qemuMonitorPtr mon, + int type, + const char *hostname, + int port, + int tlsPort, + const char *tlsSubject); /* XXX disk driver type eg, qcow/etc. * XXX cache mode diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 04ef077..8fc2796 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -1820,6 +1820,38 @@ int qemuMonitorJSONMigrateCancel(qemuMonitorPtr mon) } +int qemuMonitorJSONGraphicsRelocate(qemuMonitorPtr mon, + int type, + const char *hostname, + int port, + int tlsPort, + const char *tlsSubject) +{ + int ret = -1; + virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("client_migrate_info", + "s:protocol", + (type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE ? "spice" : "vnc"), + "s:hostname", hostname, + "i:port", port, + "i:tls-port", tlsPort, + (tlsSubject ? "s:cert-subject" : NULL), + (tlsSubject ? tlsSubject : NULL), + NULL); + virJSONValuePtr reply = NULL; + if (!cmd) + return -1; + + ret = qemuMonitorJSONCommand(mon, cmd, &reply); + + if (ret == 0) + ret = qemuMonitorJSONCheckError(cmd, reply); + + virJSONValueFree(cmd); + virJSONValueFree(reply); + return ret; +} + + int qemuMonitorJSONAddUSBDisk(qemuMonitorPtr mon ATTRIBUTE_UNUSED, const char *path ATTRIBUTE_UNUSED) { diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index f2dc4d2..98e3bed 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -117,6 +117,13 @@ int qemuMonitorJSONMigrate(qemuMonitorPtr mon, int qemuMonitorJSONMigrateCancel(qemuMonitorPtr mon); +int qemuMonitorJSONGraphicsRelocate(qemuMonitorPtr mon, + int type, + const char *hostname, + int port, + int tlsPort, + const char *tlsSubject); + int qemuMonitorJSONAddUSBDisk(qemuMonitorPtr mon, const char *path); diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c index 1a15d49..1fd2d33 100644 --- a/src/qemu/qemu_monitor_text.c +++ b/src/qemu/qemu_monitor_text.c @@ -1282,6 +1282,37 @@ int qemuMonitorTextMigrateCancel(qemuMonitorPtr mon) return 0; } + +int qemuMonitorTextGraphicsRelocate(qemuMonitorPtr mon, + int type, + const char *hostname, + int port, + int tlsPort, + const char *tlsSubject) +{ + char *cmd; + char *info = NULL; + + if (virAsprintf(&cmd, "client_migrate_info %s %s %d %d %s", + type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE ? "spice" : "vnc", + hostname, port, tlsPort, tlsSubject ? tlsSubject : "") < 0) { + virReportOOMError(); + return -1; + } + + if (qemuMonitorHMPCommand(mon, cmd, &info) < 0) { + VIR_FREE(cmd); + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("cannot run monitor command to relocate graphics client")); + return -1; + } + VIR_FREE(cmd); + VIR_FREE(info); + + return 0; +} + + int qemuMonitorTextAddUSBDisk(qemuMonitorPtr mon, const char *path) { diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h index dbae72b..c803c57 100644 --- a/src/qemu/qemu_monitor_text.h +++ b/src/qemu/qemu_monitor_text.h @@ -113,6 +113,13 @@ int qemuMonitorTextMigrate(qemuMonitorPtr mon, int qemuMonitorTextMigrateCancel(qemuMonitorPtr mon); +int qemuMonitorTextGraphicsRelocate(qemuMonitorPtr mon, + int type, + const char *hostname, + int port, + int tlsPort, + const char *tlsSubject); + int qemuMonitorTextAddUSBDisk(qemuMonitorPtr mon, const char *path); -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
Use the graphics information from the QEMU migration cookie to issue a 'client_migrate_info' monitor command to QEMU. This causes the SPICE client to automatically reconnect to the target host when migration completes
* src/qemu/qemu_migration.c: Set data for SPICE client relocation before starting migration on src * src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h, src/qemu/qemu_monitor_json.c, src/qemu/qemu_monitor_json.h, src/qemu/qemu_monitor_text.c, src/qemu/qemu_monitor_text.h: Add new qemuMonitorGraphicsRelocate() command ---
+static int +qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver, + virDomainObjPtr vm, + qemuMigrationCookiePtr cookie) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + int ret; + + if (!cookie) + return 0; + + if (!cookie->graphics) + return 0; + + /* QEMU doesnt' support VNC relocation yet, so
s/doesnt'/doesn't/
@@ -1129,6 +1162,9 @@ static int doNativeMigrate(struct qemud_driver *driver, QEMU_MIGRATION_COOKIE_GRAPHICS))) goto cleanup;
+ if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) + VIR_WARN0("unable to provide data for graphics client relocation");
More rebasing fun.
+{ + int ret; + VIR_DEBUG("mon=%p type=%d hostname=%s port=%d tlsPort=%d tlsSubject=%s", + mon, type, hostname, port, tlsPort, NULLSTR(tlsSubject)); + + if (mon->json) + ret = qemuMonitorJSONGraphicsRelocate(mon, + type, + hostname, + port, + tlsPort, + tlsSubject);
Indentation. ACK with this squashed in: diff --git i/src/qemu/qemu_migration.c w/src/qemu/qemu_migration.c index 6fa21be..99b5116 100644 --- i/src/qemu/qemu_migration.c +++ w/src/qemu/qemu_migration.c @@ -754,7 +754,7 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver, if (!cookie->graphics) return 0; - /* QEMU doesnt' support VNC relocation yet, so + /* QEMU doesn't support VNC relocation yet, so * skip it to avoid generating an error */ if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) @@ -1163,7 +1163,7 @@ static int doNativeMigrate(struct qemud_driver *driver, goto cleanup; if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) - VIR_WARN0("unable to provide data for graphics client relocation"); + VIR_WARN("unable to provide data for graphics client relocation"); /* Issue the migrate command. */ if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) { diff --git i/src/qemu/qemu_monitor.c w/src/qemu/qemu_monitor.c index 7e0e46e..15c6cab 100644 --- i/src/qemu/qemu_monitor.c +++ w/src/qemu/qemu_monitor.c @@ -1638,7 +1638,7 @@ int qemuMonitorGraphicsRelocate(qemuMonitorPtr mon, hostname, port, tlsPort, - tlsSubject); + tlsSubject); else ret = qemuMonitorTextGraphicsRelocate(mon, type, -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

To facilitate the introduction of the v3 migration protocol, the doTunnelMigrate method is refactored into two pieces. One piece is intended to mirror the flow of virDomainMigrateVersion2, while the other is the helper for setting up sockets and processing the data. Previously socket setup would be done before the 'prepare' step, so errors could be dealt with immediately, avoiding need to shut off the destination QEMU. In the new split, socket setup is done after the 'prepare' step. This is not a serious problem, since the control flow already requires calling 'finish' to tear down the destination QEMU upon several errors. * src/qemu/qemu_migration.c: --- src/qemu/qemu_migration.c | 159 ++++++++++++++++++++++----------------------- 1 files changed, 78 insertions(+), 81 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 98305c6..fb6e1cf 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1264,44 +1264,31 @@ static int doTunnelSendAll(virStreamPtr st, return 0; } + static int doTunnelMigrate(struct qemud_driver *driver, - virConnectPtr dconn, virDomainObjPtr vm, - const char *dom_xml, - const char *uri, + virStreamPtr st, unsigned long flags, - const char *dname, - unsigned long resource) + unsigned long resource ATTRIBUTE_UNUSED) { qemuDomainObjPrivatePtr priv = vm->privateData; int client_sock = -1; int qemu_sock = -1; struct sockaddr_un sa_qemu, sa_client; socklen_t addrlen; - virDomainPtr ddomain = NULL; - int retval = -1; - virStreamPtr st = NULL; - char *unixfile = NULL; - int internalret; int status; unsigned long long transferred, remaining, total; + char *unixfile = NULL; unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; + int ret = -1; - /* - * The order of operations is important here to avoid touching - * the source VM until we are very sure we can successfully - * start the migration operation. - * - * 1. setup local support infrastructure (eg sockets) - * 2. setup destination fully - * 3. start migration on source - */ - - /* - * XXX need to support migration cookies - */ + if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) && + !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { + qemuReportError(VIR_ERR_OPERATION_FAILED, + "%s", _("Source qemu is too old to support tunnelled migration")); + goto cleanup; + } - /* Stage 1. setup local support infrastructure */ if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s", driver->libDir, vm->def->name) < 0) { @@ -1345,36 +1332,6 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cleanup; } - /* check that this qemu version supports the unix migration */ - - if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) && - !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { - qemuReportError(VIR_ERR_OPERATION_FAILED, - "%s", _("Source qemu is too old to support tunnelled migration")); - goto cleanup; - } - - - /* Stage 2. setup destination fully - * - * Once stage 2 has completed successfully, we *must* call finish - * to cleanup the target whether we succeed or fail - */ - st = virStreamNew(dconn, 0); - if (st == NULL) - /* virStreamNew only fails on OOM, and it reports the error itself */ - goto cleanup; - - qemuDomainObjEnterRemoteWithDriver(driver, vm); - internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st, - flags, dname, - resource, dom_xml); - qemuDomainObjExitRemoteWithDriver(driver, vm); - - if (internalret < 0) - /* domainMigratePrepareTunnel sets the error for us */ - goto cleanup; - /* the domain may have shutdown or crashed while we had the locks dropped * in qemuDomainObjEnterRemoteWithDriver, so check again */ @@ -1386,26 +1343,28 @@ static int doTunnelMigrate(struct qemud_driver *driver, /* 3. start migration on source */ qemuDomainObjEnterMonitorWithDriver(driver, vm); + if (flags & VIR_MIGRATE_NON_SHARED_DISK) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK; if (flags & VIR_MIGRATE_NON_SHARED_INC) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC; + if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) { - internalret = qemuMonitorMigrateToUnix(priv->mon, background_flags, + ret = qemuMonitorMigrateToUnix(priv->mon, background_flags, unixfile); - } - else if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { + } else if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { const char *args[] = { "nc", "-U", unixfile, NULL }; - internalret = qemuMonitorMigrateToCommand(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args); + ret = qemuMonitorMigrateToCommand(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args); } else { - internalret = -1; + ret = -1; } qemuDomainObjExitMonitorWithDriver(driver, vm); - if (internalret < 0) { + if (ret < 0) { qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", _("tunnelled migration monitor command failed")); - goto finish; + goto cleanup; } + ret = -1; if (!virDomainObjIsActive(vm)) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", @@ -1445,16 +1404,61 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cancel; } - retval = doTunnelSendAll(st, client_sock); + ret = doTunnelSendAll(st, client_sock); cancel: - if (retval != 0 && virDomainObjIsActive(vm)) { + if (ret != 0 && virDomainObjIsActive(vm)) { qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuMonitorMigrateCancel(priv->mon); qemuDomainObjExitMonitorWithDriver(driver, vm); } -finish: +cleanup: + VIR_FORCE_CLOSE(client_sock); + VIR_FORCE_CLOSE(qemu_sock); + if (unixfile) { + unlink(unixfile); + VIR_FREE(unixfile); + } + + return ret; +} + + +static int doTunnelMigrate2(struct qemud_driver *driver, + virConnectPtr dconn, + virDomainObjPtr vm, + const char *dom_xml, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + virDomainPtr ddomain = NULL; + int retval = -1; + virStreamPtr st = NULL; + int internalret; + + /* + * Tunnelled Migrate Version 2 does not support cookies + * due to missing parameters in the prepareTunnel() API. + */ + + if (!(st = virStreamNew(dconn, 0))) + goto cleanup; + + qemuDomainObjEnterRemoteWithDriver(driver, vm); + internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st, + flags, dname, + resource, dom_xml); + qemuDomainObjExitRemoteWithDriver(driver, vm); + + if (internalret < 0) + /* domainMigratePrepareTunnel sets the error for us */ + goto cleanup; + + retval = doTunnelMigrate(driver, vm, st, flags, resource); + dname = dname ? dname : vm->def->name; qemuDomainObjEnterRemoteWithDriver(driver, vm); ddomain = dconn->driver->domainMigrateFinish2 @@ -1462,17 +1466,10 @@ finish: qemuDomainObjExitRemoteWithDriver(driver, vm); cleanup: - VIR_FORCE_CLOSE(client_sock); - VIR_FORCE_CLOSE(qemu_sock); if (ddomain) virUnrefDomain(ddomain); - if (unixfile) { - unlink(unixfile); - VIR_FREE(unixfile); - } - if (st) /* don't call virStreamFree(), because that resets any pending errors */ virUnrefStream(st); @@ -1483,14 +1480,14 @@ cleanup: /* This is essentially a simplified re-impl of * virDomainMigrateVersion2 from libvirt.c, but running in source * libvirtd context, instead of client app context */ -static int doNonTunnelMigrate(struct qemud_driver *driver, - virConnectPtr dconn, - virDomainObjPtr vm, - const char *dom_xml, - const char *uri ATTRIBUTE_UNUSED, - unsigned long flags, - const char *dname, - unsigned long resource) +static int doNonTunnelMigrate2(struct qemud_driver *driver, + virConnectPtr dconn, + virDomainObjPtr vm, + const char *dom_xml, + const char *uri ATTRIBUTE_UNUSED, + unsigned long flags, + const char *dname, + unsigned long resource) { virDomainPtr ddomain = NULL; int retval = -1; @@ -1605,9 +1602,9 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, } if (flags & VIR_MIGRATE_TUNNELLED) - ret = doTunnelMigrate(driver, dconn, vm, dom_xml, uri, flags, dname, resource); + ret = doTunnelMigrate2(driver, dconn, vm, dom_xml, uri, flags, dname, resource); else - ret = doNonTunnelMigrate(driver, dconn, vm, dom_xml, uri, flags, dname, resource); + ret = doNonTunnelMigrate2(driver, dconn, vm, dom_xml, uri, flags, dname, resource); cleanup: VIR_FREE(dom_xml); -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
To facilitate the introduction of the v3 migration protocol, the doTunnelMigrate method is refactored into two pieces. One piece is intended to mirror the flow of virDomainMigrateVersion2, while the other is the helper for setting up sockets and processing the data.
Previously socket setup would be done before the 'prepare' step, so errors could be dealt with immediately, avoiding need to shut off the destination QEMU. In the new split, socket setup is done after the 'prepare' step. This is not a serious problem, since the control flow already requires calling 'finish' to tear down the destination QEMU upon several errors.
* src/qemu/qemu_migration.c: --- src/qemu/qemu_migration.c | 159 ++++++++++++++++++++++----------------------- 1 files changed, 78 insertions(+), 81 deletions(-)
ACK. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

The v2 migration protocol was accidentally missing out the finish step, when prepare succeeded, but returned an invalid URI * src/libvirt.c: Teardown VM if prepare returns invalid URI --- src/libvirt.c | 16 +++++++++++++--- 1 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/libvirt.c b/src/libvirt.c index c11ca12..70160f9 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -3449,6 +3449,7 @@ virDomainMigrateVersion2 (virDomainPtr domain, int cookielen = 0, ret; virDomainInfo info; virErrorPtr orig_err = NULL; + int cancelled; /* Prepare the migration. * @@ -3494,7 +3495,8 @@ virDomainMigrateVersion2 (virDomainPtr domain, virLibConnError(VIR_ERR_INTERNAL_ERROR, _("domainMigratePrepare2 did not set uri")); virDispatchError(domain->conn); - goto done; + cancelled = 1; + goto finish; } if (uri_out) uri = uri_out; /* Did domainMigratePrepare2 change URI? */ @@ -3510,6 +3512,12 @@ virDomainMigrateVersion2 (virDomainPtr domain, if (ret < 0) orig_err = virSaveLastError(); + /* If Perform returns < 0, then we need to cancel the VM + * startup on the destination + */ + cancelled = ret < 0 ? 1 : 0; + +finish: /* In version 2 of the migration protocol, we pass the * status code from the sender to the destination host, * so it can do any cleanup if the migration failed. @@ -3517,7 +3525,7 @@ virDomainMigrateVersion2 (virDomainPtr domain, dname = dname ? dname : domain->name; VIR_DEBUG("Finish2 %p ret=%d", dconn, ret); ddomain = dconn->driver->domainMigrateFinish2 - (dconn, dname, cookie, cookielen, uri, flags, ret); + (dconn, dname, cookie, cookielen, uri, flags, cancelled); done: if (orig_err) { @@ -3612,7 +3620,8 @@ virDomainMigrateVersion3(virDomainPtr domain, virLibConnError(VIR_ERR_INTERNAL_ERROR, _("domainMigratePrepare3 did not set uri")); virDispatchError(domain->conn); - goto done; + cancelled = 1; + goto finish; } if (uri_out) uri = uri_out; /* Did domainMigratePrepare3 change URI? */ @@ -3641,6 +3650,7 @@ virDomainMigrateVersion3(virDomainPtr domain, */ cancelled = ret < 0 ? 1 : 0; +finish: /* * The status code from the source is passed to the destination. * The dest can cleanup if the source indicated it failed to -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
The v2 migration protocol was accidentally missing out the finish step, when prepare succeeded, but returned an invalid URI
* src/libvirt.c: Teardown VM if prepare returns invalid URI
ACK. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

Merge the doNonTunnelMigrate2 and doTunnelMigrate2 methods into one doPeer2PeerMigrate2 method, since they are substantially the same. With the introduction of v3 migration, this will be even more important, to avoid massive code duplication. * src/qemu/qemu_migration.c: Merge tunnel & non-tunnel migration --- src/qemu/qemu_migration.c | 201 ++++++++++++++++++++++----------------------- 1 files changed, 98 insertions(+), 103 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index fb6e1cf..9b8fd39 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1425,90 +1425,63 @@ cleanup: } -static int doTunnelMigrate2(struct qemud_driver *driver, - virConnectPtr dconn, - virDomainObjPtr vm, - const char *dom_xml, - const char *uri, - unsigned long flags, - const char *dname, - unsigned long resource) -{ - virDomainPtr ddomain = NULL; - int retval = -1; - virStreamPtr st = NULL; - int internalret; - - /* - * Tunnelled Migrate Version 2 does not support cookies - * due to missing parameters in the prepareTunnel() API. - */ - - if (!(st = virStreamNew(dconn, 0))) - goto cleanup; - - qemuDomainObjEnterRemoteWithDriver(driver, vm); - internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st, - flags, dname, - resource, dom_xml); - qemuDomainObjExitRemoteWithDriver(driver, vm); - - if (internalret < 0) - /* domainMigratePrepareTunnel sets the error for us */ - goto cleanup; - - retval = doTunnelMigrate(driver, vm, st, flags, resource); - - dname = dname ? dname : vm->def->name; - qemuDomainObjEnterRemoteWithDriver(driver, vm); - ddomain = dconn->driver->domainMigrateFinish2 - (dconn, dname, NULL, 0, uri, flags, retval); - qemuDomainObjExitRemoteWithDriver(driver, vm); - -cleanup: - - if (ddomain) - virUnrefDomain(ddomain); - - if (st) - /* don't call virStreamFree(), because that resets any pending errors */ - virUnrefStream(st); - return retval; -} - - -/* This is essentially a simplified re-impl of - * virDomainMigrateVersion2 from libvirt.c, but running in source - * libvirtd context, instead of client app context */ -static int doNonTunnelMigrate2(struct qemud_driver *driver, +/* This is essentially a re-impl of virDomainMigrateVersion2 + * from libvirt.c, but running in source libvirtd context, + * instead of client app context & also adding in tunnel + * handling */ +static int doPeer2PeerMigrate2(struct qemud_driver *driver, + virConnectPtr sconn, virConnectPtr dconn, virDomainObjPtr vm, - const char *dom_xml, - const char *uri ATTRIBUTE_UNUSED, + const char *uri, unsigned long flags, const char *dname, unsigned long resource) { virDomainPtr ddomain = NULL; - int retval = -1; char *uri_out = NULL; char *cookie = NULL; - int cookielen = 0; - int rc; + char *dom_xml = NULL; + int cookielen = 0, ret; + virErrorPtr orig_err = NULL; + int cancelled; + virStreamPtr st = NULL; - qemuDomainObjEnterRemoteWithDriver(driver, vm); - /* NB we don't pass 'uri' into this, since that's the libvirtd - * URI in this context - so we let dest pick it */ - rc = dconn->driver->domainMigratePrepare2(dconn, - &cookie, - &cookielen, - NULL, /* uri */ - &uri_out, - flags, dname, - resource, dom_xml); - qemuDomainObjExitRemoteWithDriver(driver, vm); - if (rc < 0) - /* domainMigratePrepare2 sets the error for us */ + /* In version 2 of the protocol, the prepare step is slightly + * different. We fetch the domain XML of the source domain + * and pass it to Prepare2. + */ + if (!(dom_xml = qemuDomainFormatXML(driver, vm, + VIR_DOMAIN_XML_SECURE | + VIR_DOMAIN_XML_UPDATE_CPU))) + return -1; + + if (vm->state == VIR_DOMAIN_PAUSED) + flags |= VIR_MIGRATE_PAUSED; + + VIR_DEBUG("Prepare2 %p", dconn); + if (flags & VIR_MIGRATE_TUNNELLED) { + /* + * Tunnelled Migrate Version 2 does not support cookies + * due to missing parameters in the prepareTunnel() API. + */ + + if (!(st = virStreamNew(dconn, 0))) + goto cleanup; + + qemuDomainObjEnterRemoteWithDriver(driver, vm); + ret = dconn->driver->domainMigratePrepareTunnel + (dconn, st, flags, dname, resource, dom_xml); + qemuDomainObjExitRemoteWithDriver(driver, vm); + } else { + qemuDomainObjEnterRemoteWithDriver(driver, vm); + ret = dconn->driver->domainMigratePrepare2 + (dconn, &cookie, &cookielen, NULL, &uri_out, + flags, dname, resource, dom_xml); + qemuDomainObjExitRemoteWithDriver(driver, vm); + } + VIR_FREE(dom_xml); + if (ret == -1) goto cleanup; /* the domain may have shutdown or crashed while we had the locks dropped @@ -1520,37 +1493,72 @@ static int doNonTunnelMigrate2(struct qemud_driver *driver, goto cleanup; } - if (uri_out == NULL) { - qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + if (!(flags & VIR_MIGRATE_TUNNELLED) && + (uri_out == NULL)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, _("domainMigratePrepare2 did not set uri")); - goto cleanup; + cancelled = 1; + goto finish; } - if (doNativeMigrate(driver, vm, uri_out, - cookie, cookielen, - NULL, NULL, /* No out cookie with v2 migration */ - flags, dname, resource) < 0) - goto finish; + /* Perform the migration. The driver isn't supposed to return + * until the migration is complete. + */ + VIR_DEBUG("Perform %p", sconn); + if (flags & VIR_MIGRATE_TUNNELLED) + ret = doTunnelMigrate(driver, vm, st, flags, resource); + else + ret = doNativeMigrate(driver, vm, uri_out, + cookie, cookielen, + NULL, NULL, /* No out cookie with v2 migration */ + flags, dname, resource); + + /* Perform failed. Make sure Finish doesn't overwrite the error */ + if (ret < 0) + orig_err = virSaveLastError(); - retval = 0; + /* If Perform returns < 0, then we need to cancel the VM + * startup on the destination + */ + cancelled = ret < 0 ? 1 : 0; finish: + /* In version 2 of the migration protocol, we pass the + * status code from the sender to the destination host, + * so it can do any cleanup if the migration failed. + */ dname = dname ? dname : vm->def->name; + VIR_DEBUG("Finish2 %p ret=%d", dconn, ret); qemuDomainObjEnterRemoteWithDriver(driver, vm); ddomain = dconn->driver->domainMigrateFinish2 - (dconn, dname, cookie, cookielen, uri_out, flags, retval); + (dconn, dname, cookie, cookielen, + uri_out ? uri_out : uri, flags, cancelled); qemuDomainObjExitRemoteWithDriver(driver, vm); - if (ddomain) +cleanup: + if (ddomain) { virUnrefDomain(ddomain); + ret = 0; + } else { + ret = -1; + } -cleanup: + if (st) + virUnrefStream(st); + + if (orig_err) { + virSetError(orig_err); + virFreeError(orig_err); + } + VIR_FREE(uri_out); VIR_FREE(cookie); - return retval; + + return ret; } static int doPeer2PeerMigrate(struct qemud_driver *driver, + virConnectPtr sconn, virDomainObjPtr vm, const char *uri, unsigned long flags, @@ -1559,7 +1567,6 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, { int ret = -1; virConnectPtr dconn = NULL; - char *dom_xml; bool p2p; /* the order of operations is important here; we make sure the @@ -1592,22 +1599,10 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, goto cleanup; } - dom_xml = qemuDomainFormatXML(driver, vm, - VIR_DOMAIN_XML_SECURE | - VIR_DOMAIN_XML_UPDATE_CPU); - if (!dom_xml) { - qemuReportError(VIR_ERR_OPERATION_FAILED, - "%s", _("failed to get domain xml")); - goto cleanup; - } - - if (flags & VIR_MIGRATE_TUNNELLED) - ret = doTunnelMigrate2(driver, dconn, vm, dom_xml, uri, flags, dname, resource); - else - ret = doNonTunnelMigrate2(driver, dconn, vm, dom_xml, uri, flags, dname, resource); + ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm, + uri, flags, dname, resource); cleanup: - VIR_FREE(dom_xml); /* don't call virConnectClose(), because that resets any pending errors */ qemuDomainObjEnterRemoteWithDriver(driver, vm); virUnrefConnect(dconn); @@ -1660,7 +1655,7 @@ int qemuMigrationPerform(struct qemud_driver *driver, goto endjob; } - if (doPeer2PeerMigrate(driver, vm, uri, flags, dname, resource) < 0) + if (doPeer2PeerMigrate(driver, conn, vm, uri, flags, dname, resource) < 0) /* doPeer2PeerMigrate already set the error, so just get out */ goto endjob; } else { -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
Merge the doNonTunnelMigrate2 and doTunnelMigrate2 methods into one doPeer2PeerMigrate2 method, since they are substantially the same. With the introduction of v3 migration, this will be even more important, to avoid massive code duplication.
* src/qemu/qemu_migration.c: Merge tunnel & non-tunnel migration --- src/qemu/qemu_migration.c | 201 ++++++++++++++++++++++----------------------- 1 files changed, 98 insertions(+), 103 deletions(-)
ACK. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

Implement the v3 migration protocol, which has two extra steps, 'begin' on the source host and 'confirm' on the source host. All other methods also gain both input and output cookies to allow bi-directional data passing at all stages. The QEMU peer2peer migration method gains another impl to provide the v3 migration. This finally allows migration cookies to work with tunnelled migration, which is required for Spice seemless migration & the lock manager transfer * src/qemu/qemu_driver.c: Wire up migrate v3 APIs * src/qemu/qemu_migration.c, src/qemu/qemu_migration.h: Add begin & confirm methods, and peer2peer impl of v3 --- src/qemu/qemu_driver.c | 318 +++++++++++++++++++++++++++++++++++++++- src/qemu/qemu_migration.c | 353 +++++++++++++++++++++++++++++++++++++++++++-- src/qemu/qemu_migration.h | 17 ++- 3 files changed, 664 insertions(+), 24 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index faddf18..0b15437 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -870,6 +870,7 @@ qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature) { switch (feature) { case VIR_DRV_FEATURE_MIGRATION_V2: + case VIR_DRV_FEATURE_MIGRATION_V3: case VIR_DRV_FEATURE_MIGRATION_P2P: return 1; default: @@ -5633,7 +5634,9 @@ qemuDomainEventDeregisterAny(virConnectPtr conn, } -/* Migration support. */ +/******************************************************************* + * Migration Protocol Version 2 + *******************************************************************/ /* Prepare is the first step, and it runs on the destination host. * @@ -5651,6 +5654,15 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn, struct qemud_driver *driver = dconn->privateData; int ret = -1; + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + if (!dom_xml) { qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("no domain XML passed")); @@ -5770,7 +5782,7 @@ qemudDomainMigratePerform (virDomainPtr dom, ret = qemuMigrationPerform(driver, dom->conn, vm, uri, cookie, cookielen, NULL, NULL, /* No output cookies in v2 */ - flags, dname, resource); + flags, dname, resource, true); cleanup: qemuDriverUnlock(driver); @@ -5827,6 +5839,296 @@ cleanup: } +/******************************************************************* + * Migration Protocol Version 3 + *******************************************************************/ + +static char * +qemuDomainMigrateBegin3(virDomainPtr domain, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname ATTRIBUTE_UNUSED, + unsigned long resource ATTRIBUTE_UNUSED) +{ + struct qemud_driver *driver = domain->conn->privateData; + virDomainObjPtr vm; + char *xml = NULL; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, NULL); + + qemuDriverLock(driver); + vm = virDomainFindByUUID(&driver->domains, domain->uuid); + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(domain->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + } + + xml = qemuMigrationBegin(driver, vm, + cookieout, cookieoutlen); + +cleanup: + qemuDriverUnlock(driver); + return xml; +} + +static int +qemuDomainMigratePrepare3(virConnectPtr dconn, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri_in, + char **uri_out, + unsigned long flags, + const char *dname, + unsigned long resource ATTRIBUTE_UNUSED, + const char *dom_xml) +{ + struct qemud_driver *driver = dconn->privateData; + int ret = -1; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + + *uri_out = NULL; + + qemuDriverLock(driver); + if (flags & VIR_MIGRATE_TUNNELLED) { + /* this is a logical error; we never should have gotten here with + * VIR_MIGRATE_TUNNELLED set + */ + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("Tunnelled migration requested but invalid RPC method called")); + goto cleanup; + } + + if (!dom_xml) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("no domain XML passed")); + goto cleanup; + } + + ret = qemuMigrationPrepareDirect(driver, dconn, + cookiein, cookieinlen, + cookieout, cookieoutlen, + uri_in, uri_out, + dname, dom_xml); + +cleanup: + qemuDriverUnlock(driver); + return ret; +} + + +static int +qemuDomainMigratePrepareTunnel3(virConnectPtr dconn, + virStreamPtr st, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource ATTRIBUTE_UNUSED, + const char *dom_xml) +{ + struct qemud_driver *driver = dconn->privateData; + int ret = -1; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + + if (!dom_xml) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("no domain XML passed")); + goto cleanup; + } + if (!(flags & VIR_MIGRATE_TUNNELLED)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("PrepareTunnel called but no TUNNELLED flag set")); + goto cleanup; + } + if (st == NULL) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("tunnelled migration requested but NULL stream passed")); + goto cleanup; + } + + qemuDriverLock(driver); + ret = qemuMigrationPrepareTunnel(driver, dconn, + cookiein, cookieinlen, + cookieout, cookieoutlen, + st, dname, dom_xml); + qemuDriverUnlock(driver); + +cleanup: + return ret; +} + + +static int +qemuDomainMigratePerform3(virDomainPtr dom, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + struct qemud_driver *driver = dom->conn->privateData; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + + qemuDriverLock(driver); + vm = virDomainFindByUUID(&driver->domains, dom->uuid); + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(dom->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + } + + ret = qemuMigrationPerform(driver, dom->conn, vm, + uri, cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, dname, resource, false); + +cleanup: + qemuDriverUnlock(driver); + return ret; +} + + +static int +qemuDomainMigrateFinish3(virConnectPtr dconn, + const char *dname, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + const char *uri ATTRIBUTE_UNUSED, + unsigned long flags, + int cancelled, + virDomainPtr *newdom) +{ + struct qemud_driver *driver = dconn->privateData; + virDomainObjPtr vm; + virErrorPtr orig_err; + int ret = -1; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + + /* Migration failed. Save the current error so nothing squashes it */ + orig_err = virSaveLastError(); + + qemuDriverLock(driver); + vm = virDomainFindByName(&driver->domains, dname); + if (!vm) { + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching name '%s'"), dname); + goto cleanup; + } + + *newdom = qemuMigrationFinish(driver, dconn, vm, + cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, cancelled); + + ret = 0; + +cleanup: + if (orig_err) { + virSetError(orig_err); + virFreeError(orig_err); + } + qemuDriverUnlock(driver); + return ret; +} + +static int +qemuDomainMigrateConfirm3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + unsigned long flags, + int cancelled) +{ + struct qemud_driver *driver = domain->conn->privateData; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + + /* Migration failed. Save the current error so nothing squashes it */ + + qemuDriverLock(driver); + vm = virDomainFindByUUID(&driver->domains, domain->uuid); + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(domain->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + } + + ret = qemuMigrationConfirm(driver, domain->conn, vm, + cookiein, cookieinlen, + flags, cancelled, false); + +cleanup: + qemuDriverUnlock(driver); + return ret; +} + + static int qemudNodeDeviceGetPciInfo (virNodeDevicePtr dev, unsigned *domain, @@ -7250,12 +7552,12 @@ static virDriver qemuDriver = { qemuDomainMonitorCommand, /* qemuDomainMonitorCommand */ qemuDomainOpenConsole, /* domainOpenConsole */ qemuDomainInjectNMI, /* domainInjectNMI */ - NULL, /* domainMigrateBegin3 */ - NULL, /* domainMigratePrepare3 */ - NULL, /* domainMigratePrepareTunnel3 */ - NULL, /* domainMigratePerform3 */ - NULL, /* domainMigrateFinish3 */ - NULL, /* domainMigrateConfirm3 */ + qemuDomainMigrateBegin3, /* domainMigrateBegin3 */ + qemuDomainMigratePrepare3, /* domainMigratePrepare3 */ + qemuDomainMigratePrepareTunnel3, /* domainMigratePrepareTunnel3 */ + qemuDomainMigratePerform3, /* domainMigratePerform3 */ + qemuDomainMigrateFinish3, /* domainMigrateFinish3 */ + qemuDomainMigrateConfirm3, /* domainMigrateConfirm3 */ }; diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 9b8fd39..6cb529f 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -773,6 +773,42 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver, } +char *qemuMigrationBegin(struct qemud_driver *driver, + virDomainObjPtr vm, + char **cookieout, + int *cookieoutlen) +{ + char *rv = NULL; + qemuMigrationCookiePtr mig = NULL; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto cleanup; + } + + if (!qemuMigrationIsAllowed(vm->def)) + goto cleanup; + + if (!(mig = qemuMigrationEatCookie(vm, NULL, 0, 0))) + goto cleanup; + + if (qemuMigrationBakeCookie(mig, driver, vm, + cookieout, cookieoutlen, + 0) < 0) + goto cleanup; + + rv = qemuDomainFormatXML(driver, vm, + VIR_DOMAIN_XML_SECURE | + VIR_DOMAIN_XML_UPDATE_CPU); + +cleanup: + virDomainObjUnlock(vm); + qemuMigrationCookieFree(mig); + return rv; +} + + /* Prepare is the first step, and it runs on the destination host. * * This version starts an empty VM listening on a localhost TCP port, and @@ -1268,6 +1304,10 @@ static int doTunnelSendAll(virStreamPtr st, static int doTunnelMigrate(struct qemud_driver *driver, virDomainObjPtr vm, virStreamPtr st, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, unsigned long flags, unsigned long resource ATTRIBUTE_UNUSED) { @@ -1281,6 +1321,7 @@ static int doTunnelMigrate(struct qemud_driver *driver, char *unixfile = NULL; unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; int ret = -1; + qemuMigrationCookiePtr mig = NULL; if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) && !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { @@ -1341,6 +1382,13 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cleanup; } + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_GRAPHICS))) + goto cleanup; + + if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) + VIR_WARN0("unable to provide data for graphics client relocation"); + /* 3. start migration on source */ qemuDomainObjEnterMonitorWithDriver(driver, vm); @@ -1406,6 +1454,10 @@ static int doTunnelMigrate(struct qemud_driver *driver, ret = doTunnelSendAll(st, client_sock); + if (ret == 0 && + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) + VIR_WARN0("Unable to encode migration cookie"); + cancel: if (ret != 0 && virDomainObjIsActive(vm)) { qemuDomainObjEnterMonitorWithDriver(driver, vm); @@ -1414,6 +1466,7 @@ cancel: } cleanup: + qemuMigrationCookieFree(mig); VIR_FORCE_CLOSE(client_sock); VIR_FORCE_CLOSE(qemu_sock); if (unixfile) { @@ -1506,7 +1559,9 @@ static int doPeer2PeerMigrate2(struct qemud_driver *driver, */ VIR_DEBUG("Perform %p", sconn); if (flags & VIR_MIGRATE_TUNNELLED) - ret = doTunnelMigrate(driver, vm, st, flags, resource); + ret = doTunnelMigrate(driver, vm, st, + NULL, 0, NULL, NULL, + flags, resource); else ret = doNativeMigrate(driver, vm, uri_out, cookie, cookielen, @@ -1557,6 +1612,180 @@ cleanup: } +/* This is essentially a re-impl of virDomainMigrateVersion3 + * from libvirt.c, but running in source libvirtd context, + * instead of client app context & also adding in tunnel + * handling */ +static int doPeer2PeerMigrate3(struct qemud_driver *driver, + virConnectPtr sconn, + virConnectPtr dconn, + virDomainObjPtr vm, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + virDomainPtr ddomain = NULL; + char *uri_out = NULL; + char *cookiein = NULL; + char *cookieout = NULL; + char *dom_xml = NULL; + int cookieinlen = 0; + int cookieoutlen = 0; + int ret = -1; + virErrorPtr orig_err = NULL; + int cancelled; + virStreamPtr st = NULL; + + VIR_DEBUG("Begin3 %p", sconn); + dom_xml = qemuMigrationBegin(driver, vm, + &cookieout, &cookieoutlen); + if (!dom_xml) + goto cleanup; + + if (vm->state == VIR_DOMAIN_PAUSED) + flags |= VIR_MIGRATE_PAUSED; + + VIR_DEBUG("Prepare3 %p", dconn); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + if (flags & VIR_MIGRATE_TUNNELLED) { + /* + * Tunnelled Migrate Version 2 does not support cookies + * due to missing parameters in the prepareTunnel() API. + */ + + if (!(st = virStreamNew(dconn, 0))) + goto cleanup; + + qemuDomainObjEnterRemoteWithDriver(driver, vm); + ret = dconn->driver->domainMigratePrepareTunnel3 + (dconn, st, cookiein, cookieinlen, + &cookieout, &cookieoutlen, + flags, dname, resource, dom_xml); + qemuDomainObjExitRemoteWithDriver(driver, vm); + } else { + qemuDomainObjEnterRemoteWithDriver(driver, vm); + ret = dconn->driver->domainMigratePrepare3 + (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen, + NULL, &uri_out, flags, dname, resource, dom_xml); + qemuDomainObjExitRemoteWithDriver(driver, vm); + } + VIR_FREE(dom_xml); + if (ret == -1) + goto cleanup; + + if (!(flags & VIR_MIGRATE_TUNNELLED) && + (uri_out == NULL)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("domainMigratePrepare3 did not set uri")); + cancelled = 1; + goto finish; + } + + /* Perform the migration. The driver isn't supposed to return + * until the migration is complete. The src VM should remain + * running, but in paused state until the destination can + * confirm migration completion. + */ + VIR_DEBUG("Perform3 %p uri=%s", sconn, uri_out); + VIR_FREE(cookiein); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + if (flags & VIR_MIGRATE_TUNNELLED) + ret = doTunnelMigrate(driver, vm, st, + cookiein, cookieinlen, + &cookieout, &cookieoutlen, + flags, resource); + else + ret = doNativeMigrate(driver, vm, uri_out, + cookiein, cookieinlen, + &cookieout, &cookieoutlen, + flags, dname, resource); + + /* Perform failed. Make sure Finish doesn't overwrite the error */ + if (ret < 0) + orig_err = virSaveLastError(); + + /* If Perform returns < 0, then we need to cancel the VM + * startup on the destination + */ + cancelled = ret < 0 ? 1 : 0; + +finish: + /* + * The status code from the source is passed to the destination. + * The dest can cleanup in the source indicated it failed to + * send all migration data. Returns NULL for ddomain if + * the dest was unable to complete migration. + */ + VIR_DEBUG("Finish3 %p ret=%d", dconn, ret); + VIR_FREE(cookiein); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + dname = dname ? dname : vm->def->name; + qemuDomainObjEnterRemoteWithDriver(driver, vm); + ret = dconn->driver->domainMigrateFinish3 + (dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen, + uri_out ? uri_out : uri, flags, cancelled, &ddomain); + qemuDomainObjExitRemoteWithDriver(driver, vm); + + /* If ret is 0 then 'ddomain' indicates whether the VM is + * running on the dest. If not running, we can restart + * the source. If ret is -1, we can't be sure what happened + * to the VM on the dest, thus the only safe option is to + * kill the VM on the source, even though that may leave + * no VM at all on either host. + */ + cancelled = ret == 0 && ddomain == NULL ? 1 : 0; + + /* + * If cancelled, then src VM will be restarted, else + * it will be killed + */ + VIR_DEBUG("Confirm3 %p ret=%d vm=%p", sconn, ret, vm); + VIR_FREE(cookiein); + cookiein = cookieout; + cookieinlen = cookieoutlen; + cookieout = NULL; + cookieoutlen = 0; + ret = qemuMigrationConfirm(driver, sconn, vm, + cookiein, cookieinlen, + flags, cancelled, true); + /* If Confirm3 returns -1, there's nothing more we can + * do, but fortunately worst case is that there is a + * domain left in 'paused' state on source. + */ + + cleanup: + if (ddomain) { + virUnrefDomain(ddomain); + ret = 0; + } else { + ret = -1; + } + + if (st) + virUnrefStream(st); + + if (orig_err) { + virSetError(orig_err); + virFreeError(orig_err); + } + VIR_FREE(uri_out); + VIR_FREE(cookiein); + VIR_FREE(cookieout); + + return ret; +} + + static int doPeer2PeerMigrate(struct qemud_driver *driver, virConnectPtr sconn, virDomainObjPtr vm, @@ -1568,6 +1797,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, int ret = -1; virConnectPtr dconn = NULL; bool p2p; + bool v3; /* the order of operations is important here; we make sure the * destination side is completely setup before we touch the source @@ -1585,7 +1815,10 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, qemuDomainObjEnterRemoteWithDriver(driver, vm); p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_P2P); + v3 = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, + VIR_DRV_FEATURE_MIGRATION_V3); qemuDomainObjExitRemoteWithDriver(driver, vm); + if (!p2p) { qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", _("Destination libvirt does not support peer-to-peer migration protocol")); @@ -1599,8 +1832,12 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, goto cleanup; } - ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm, - uri, flags, dname, resource); + if (v3) + ret = doPeer2PeerMigrate3(driver, sconn, dconn, vm, + uri, flags, dname, resource); + else + ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm, + uri, flags, dname, resource); cleanup: /* don't call virConnectClose(), because that resets any pending errors */ @@ -1622,7 +1859,8 @@ int qemuMigrationPerform(struct qemud_driver *driver, int *cookieoutlen, unsigned long flags, const char *dname, - unsigned long resource) + unsigned long resource, + bool killOnFinish) { virDomainEventPtr event = NULL; int ret = -1; @@ -1666,18 +1904,20 @@ int qemuMigrationPerform(struct qemud_driver *driver, } /* Clean up the source domain. */ - qemuProcessStop(driver, vm, 1); - qemuAuditDomainStop(vm, "migrated"); - resume = 0; + if (killOnFinish) { + qemuProcessStop(driver, vm, 1); + qemuAuditDomainStop(vm, "migrated"); + resume = 0; - event = virDomainEventNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_MIGRATED); - if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) { - virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm); - if (qemuDomainObjEndJob(vm) > 0) - virDomainRemoveInactive(&driver->domains, vm); - vm = NULL; + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_MIGRATED); + if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) { + virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm); + if (qemuDomainObjEndJob(vm) > 0) + virDomainRemoveInactive(&driver->domains, vm); + vm = NULL; + } } ret = 0; @@ -1883,6 +2123,89 @@ cleanup: return dom; } + +int qemuMigrationConfirm(struct qemud_driver *driver, + virConnectPtr conn, + virDomainObjPtr vm, + const char *cookiein, + int cookieinlen, + unsigned int flags, + int retcode, + bool skipJob) +{ + qemuMigrationCookiePtr mig; + virDomainEventPtr event = NULL; + int rv = -1; + + if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0))) + return -1; + + if (!skipJob && + qemuDomainObjBeginJobWithDriver(driver, vm) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest unexpectedly quit")); + goto endjob; + } + + /* Did the migration go as planned? If yes, kill off the + * domain object, but if no, resume CPUs + */ + if (retcode == 0) { + qemuProcessStop(driver, vm, 1); + qemuAuditDomainStop(vm, "migrated"); + + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_MIGRATED); + if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) { + virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm); + if (qemuDomainObjEndJob(vm) > 0) + virDomainRemoveInactive(&driver->domains, vm); + vm = NULL; + } + } else { + + /* run 'cont' on the destination, which allows migration on qemu + * >= 0.10.6 to work properly. This isn't strictly necessary on + * older qemu's, but it also doesn't hurt anything there + */ + if (qemuProcessStartCPUs(driver, vm, conn) < 0) { + if (virGetLastError() == NULL) + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("resume operation failed")); + goto endjob; + } + + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) { + VIR_WARN("Failed to save status on vm %s", vm->def->name); + goto endjob; + } + } + + qemuMigrationCookieFree(mig); + rv = 0; + +endjob: + if (vm && + !skipJob && + qemuDomainObjEndJob(vm) == 0) + vm = NULL; + +cleanup: + if (vm) + virDomainObjUnlock(vm); + if (event) + qemuDomainEventQueue(driver, event); + return rv; +} + + /* Helper function called while driver lock is held and vm is active. */ int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm, diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 0db176b..f96a0b8 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -32,6 +32,11 @@ int qemuMigrationSetOffline(struct qemud_driver *driver, int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm); +char *qemuMigrationBegin(struct qemud_driver *driver, + virDomainObjPtr vm, + char **cookieout, + int *cookieoutlen); + int qemuMigrationPrepareTunnel(struct qemud_driver *driver, virConnectPtr dconn, const char *cookiein, @@ -63,7 +68,8 @@ int qemuMigrationPerform(struct qemud_driver *driver, int *cookieoutlen, unsigned long flags, const char *dname, - unsigned long resource); + unsigned long resource, + bool killOnFinish); virDomainPtr qemuMigrationFinish(struct qemud_driver *driver, virConnectPtr dconn, @@ -75,6 +81,15 @@ virDomainPtr qemuMigrationFinish(struct qemud_driver *driver, unsigned long flags, int retcode); +int qemuMigrationConfirm(struct qemud_driver *driver, + virConnectPtr conn, + virDomainObjPtr vm, + const char *cookiein, + int cookieinlen, + unsigned int flags, + int retcode, + bool skipJob); + int qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm, int fd, off_t offset, const char *path, -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
Implement the v3 migration protocol, which has two extra steps, 'begin' on the source host and 'confirm' on the source host. All other methods also gain both input and output cookies to allow bi-directional data passing at all stages.
The QEMU peer2peer migration method gains another impl to provide the v3 migration. This finally allows migration cookies to work with tunnelled migration, which is required for Spice seemless migration & the lock manager transfer
s/seemless/seamless/
* src/qemu/qemu_driver.c: Wire up migrate v3 APIs * src/qemu/qemu_migration.c, src/qemu/qemu_migration.h: Add begin & confirm methods, and peer2peer impl of v3 --- src/qemu/qemu_driver.c | 318 +++++++++++++++++++++++++++++++++++++++- src/qemu/qemu_migration.c | 353 +++++++++++++++++++++++++++++++++++++++++++-- src/qemu/qemu_migration.h | 17 ++- 3 files changed, 664 insertions(+), 24 deletions(-)
+static int +qemuDomainMigrateConfirm3(virDomainPtr domain, + const char *cookiein, + int cookieinlen, + unsigned long flags, + int cancelled) +{ + struct qemud_driver *driver = domain->conn->privateData; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(VIR_MIGRATE_LIVE | + VIR_MIGRATE_PEER2PEER | + VIR_MIGRATE_TUNNELLED | + VIR_MIGRATE_PERSIST_DEST | + VIR_MIGRATE_UNDEFINE_SOURCE | + VIR_MIGRATE_PAUSED | + VIR_MIGRATE_NON_SHARED_DISK | + VIR_MIGRATE_NON_SHARED_INC, -1); + + /* Migration failed. Save the current error so nothing squashes it */
Spurious comment.
@@ -1341,6 +1382,13 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cleanup; }
+ if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, + QEMU_MIGRATION_COOKIE_GRAPHICS))) + goto cleanup; + + if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) + VIR_WARN0("unable to provide data for graphics client relocation");
Rebase woes.
+/* This is essentially a re-impl of virDomainMigrateVersion3 + * from libvirt.c, but running in source libvirtd context, + * instead of client app context & also adding in tunnel + * handling */ +static int doPeer2PeerMigrate3(struct qemud_driver *driver, + virConnectPtr sconn, + virConnectPtr dconn, + virDomainObjPtr vm, + const char *uri, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + virDomainPtr ddomain = NULL;
+ if (flags & VIR_MIGRATE_TUNNELLED) { + /* + * Tunnelled Migrate Version 2 does not support cookies + * due to missing parameters in the prepareTunnel() API. + */ +
Too much copy-and-paste. ACK with this squashed in: diff --git i/src/qemu/qemu_driver.c w/src/qemu/qemu_driver.c index 0650d44..eb50565 100644 --- i/src/qemu/qemu_driver.c +++ w/src/qemu/qemu_driver.c @@ -6107,8 +6107,6 @@ qemuDomainMigrateConfirm3(virDomainPtr domain, VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC, -1); - /* Migration failed. Save the current error so nothing squashes it */ - qemuDriverLock(driver); vm = virDomainFindByUUID(&driver->domains, domain->uuid); if (!vm) { diff --git i/src/qemu/qemu_migration.c w/src/qemu/qemu_migration.c index ccb798f..ea30229 100644 --- i/src/qemu/qemu_migration.c +++ w/src/qemu/qemu_migration.c @@ -1387,7 +1387,7 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cleanup; if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0) - VIR_WARN0("unable to provide data for graphics client relocation"); + VIR_WARN("unable to provide data for graphics client relocation"); /* 3. start migration on source */ qemuDomainObjEnterMonitorWithDriver(driver, vm); @@ -1456,8 +1456,8 @@ static int doTunnelMigrate(struct qemud_driver *driver, if (ret == 0 && qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) - VIR_WARN0("Unable to encode migration cookie"); - + VIR_WARN("Unable to encode migration cookie"); +y cancel: if (ret != 0 && virDomainObjIsActive(vm)) { qemuDomainObjEnterMonitorWithDriver(driver, vm); @@ -1652,11 +1652,6 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, cookieout = NULL; cookieoutlen = 0; if (flags & VIR_MIGRATE_TUNNELLED) { - /* - * Tunnelled Migrate Version 2 does not support cookies - * due to missing parameters in the prepareTunnel() API. - */ - if (!(st = virStreamNew(dconn, 0))) goto cleanup; -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

On 05/11/2011 09:30 PM, Eric Blake wrote:
On 05/11/2011 03:09 AM, Daniel P. Berrange wrote: ACK with this squashed in:
if (ret == 0 && qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) - VIR_WARN0("Unable to encode migration cookie"); - + VIR_WARN("Unable to encode migration cookie"); +y cancel:
Oops, don't squash in the stray 'y'. (It's getting too late for me, obviously...). -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

The qemuMigrationWaitForCompletion method contains a loop which repeatedly queries QEMU to check migration progress, and also processes job signals (pause, setspeed, setbandwidth, cancel). The tunnelled migration loop does not currently support this functionality, but should. Refactor the code to allow it to be used with tunnelled migration. --- src/qemu/qemu_migration.c | 252 +++++++++++++++++++++++++-------------------- 1 files changed, 142 insertions(+), 110 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6cb529f..62b6b9e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -583,14 +583,148 @@ qemuMigrationSetOffline(struct qemud_driver *driver, } -int -qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) +static int +qemuMigrationProcessJobSignals(struct qemud_driver *driver, + virDomainObjPtr vm, + const char *job) { + qemuDomainObjPrivatePtr priv = vm->privateData; + int ret = -1; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), + job, _("guest unexpectedly quit")); + return -1; + } + + if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) { + priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL; + VIR_DEBUG0("Cancelling job at client request"); + qemuDomainObjEnterMonitorWithDriver(driver, vm); + ret = qemuMonitorMigrateCancel(priv->mon); + qemuDomainObjExitMonitorWithDriver(driver, vm); + if (ret < 0) { + VIR_WARN0("Unable to cancel job"); + } + } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) { + priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND; + VIR_DEBUG0("Pausing domain for non-live migration"); + if (qemuMigrationSetOffline(driver, vm) < 0) + VIR_WARN0("Unable to pause domain"); + } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) { + unsigned long long ms = priv->jobSignalsData.migrateDowntime; + + priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME; + priv->jobSignalsData.migrateDowntime = 0; + VIR_DEBUG("Setting migration downtime to %llums", ms); + qemuDomainObjEnterMonitorWithDriver(driver, vm); + ret = qemuMonitorSetMigrationDowntime(priv->mon, ms); + qemuDomainObjExitMonitorWithDriver(driver, vm); + if (ret < 0) + VIR_WARN0("Unable to set migration downtime"); + } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) { + unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth; + + priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED; + priv->jobSignalsData.migrateBandwidth = 0; + VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth); + qemuDomainObjEnterMonitorWithDriver(driver, vm); + ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); + qemuDomainObjExitMonitorWithDriver(driver, vm); + if (ret < 0) + VIR_WARN0("Unable to set migration speed"); + } else { + ret = 0; + } + + return ret; +} + + +static int +qemuMigrationUpdateJobStatus(struct qemud_driver *driver, + virDomainObjPtr vm, + const char *job) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; int status; unsigned long long memProcessed; unsigned long long memRemaining; unsigned long long memTotal; + struct timeval now; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), + job, _("guest unexpectedly quit")); + return -1; + } + + qemuDomainObjEnterMonitorWithDriver(driver, vm); + ret = qemuMonitorGetMigrationStatus(priv->mon, + &status, + &memProcessed, + &memRemaining, + &memTotal); + qemuDomainObjExitMonitorWithDriver(driver, vm); + + if (ret < 0) { + priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; + return -1; + } + + if (gettimeofday(&now, NULL) < 0) { + priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; + virReportSystemError(errno, "%s", + _("cannot get time of day")); + return -1; + } + priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart; + + switch (status) { + case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: + priv->jobInfo.type = VIR_DOMAIN_JOB_NONE; + qemuReportError(VIR_ERR_OPERATION_FAILED, + _("%s: %s"), job, _("is not active")); + break; + + case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: + priv->jobInfo.dataTotal = memTotal; + priv->jobInfo.dataRemaining = memRemaining; + priv->jobInfo.dataProcessed = memProcessed; + + priv->jobInfo.memTotal = memTotal; + priv->jobInfo.memRemaining = memRemaining; + priv->jobInfo.memProcessed = memProcessed; + + ret = 0; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: + priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED; + ret = 0; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_ERROR: + priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; + qemuReportError(VIR_ERR_OPERATION_FAILED, + _("%s: %s"), job, _("unexpectedly failed")); + break; + + case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: + priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED; + qemuReportError(VIR_ERR_OPERATION_FAILED, + _("%s: %s"), job, _("canceled by client")); + break; + } + + return ret; +} + + +int +qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) +{ qemuDomainObjPrivatePtr priv = vm->privateData; priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED; @@ -598,8 +732,6 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) { /* Poll every 50ms for progress & to allow cancellation */ struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; - struct timeval now; - int rc; const char *job; switch (priv->jobActive) { @@ -616,115 +748,12 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) job = _("job"); } - - if (!virDomainObjIsActive(vm)) { - qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), - job, _("guest unexpectedly quit")); - goto cleanup; - } - - if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) { - priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL; - VIR_DEBUG0("Cancelling job at client request"); - qemuDomainObjEnterMonitorWithDriver(driver, vm); - rc = qemuMonitorMigrateCancel(priv->mon); - qemuDomainObjExitMonitorWithDriver(driver, vm); - if (rc < 0) { - VIR_WARN0("Unable to cancel job"); - } - } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) { - priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND; - VIR_DEBUG0("Pausing domain for non-live migration"); - if (qemuMigrationSetOffline(driver, vm) < 0) - VIR_WARN0("Unable to pause domain"); - } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) { - unsigned long long ms = priv->jobSignalsData.migrateDowntime; - - priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME; - priv->jobSignalsData.migrateDowntime = 0; - VIR_DEBUG("Setting migration downtime to %llums", ms); - qemuDomainObjEnterMonitorWithDriver(driver, vm); - rc = qemuMonitorSetMigrationDowntime(priv->mon, ms); - qemuDomainObjExitMonitorWithDriver(driver, vm); - if (rc < 0) - VIR_WARN0("Unable to set migration downtime"); - } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) { - unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth; - - priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED; - priv->jobSignalsData.migrateBandwidth = 0; - VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth); - qemuDomainObjEnterMonitorWithDriver(driver, vm); - rc = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth); - qemuDomainObjExitMonitorWithDriver(driver, vm); - if (rc < 0) - VIR_WARN0("Unable to set migration speed"); - } - - /* Repeat check because the job signals might have caused - * guest to die - */ - if (!virDomainObjIsActive(vm)) { - qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), - job, _("guest unexpectedly quit")); + if (qemuMigrationProcessJobSignals(driver, vm, job) < 0) goto cleanup; - } - qemuDomainObjEnterMonitorWithDriver(driver, vm); - rc = qemuMonitorGetMigrationStatus(priv->mon, - &status, - &memProcessed, - &memRemaining, - &memTotal); - qemuDomainObjExitMonitorWithDriver(driver, vm); - - if (rc < 0) { - priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; + if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0) goto cleanup; - } - - if (gettimeofday(&now, NULL) < 0) { - priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; - virReportSystemError(errno, "%s", - _("cannot get time of day")); - goto cleanup; - } - priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart; - - switch (status) { - case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: - priv->jobInfo.type = VIR_DOMAIN_JOB_NONE; - qemuReportError(VIR_ERR_OPERATION_FAILED, - _("%s: %s"), job, _("is not active")); - break; - case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: - priv->jobInfo.dataTotal = memTotal; - priv->jobInfo.dataRemaining = memRemaining; - priv->jobInfo.dataProcessed = memProcessed; - - priv->jobInfo.memTotal = memTotal; - priv->jobInfo.memRemaining = memRemaining; - priv->jobInfo.memProcessed = memProcessed; - break; - - case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: - priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED; - ret = 0; - break; - - case QEMU_MONITOR_MIGRATION_STATUS_ERROR: - priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED; - qemuReportError(VIR_ERR_OPERATION_FAILED, - _("%s: %s"), job, _("unexpectedly failed")); - break; - - case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: - priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED; - qemuReportError(VIR_ERR_OPERATION_FAILED, - _("%s: %s"), job, _("canceled by client")); - break; - } virDomainObjUnlock(vm); qemuDriverUnlock(driver); @@ -736,7 +765,10 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) } cleanup: - return ret; + if (priv->jobInfo.type == VIR_DOMAIN_JOB_COMPLETED) + return 0; + else + return -1; } -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
The qemuMigrationWaitForCompletion method contains a loop which repeatedly queries QEMU to check migration progress, and also processes job signals (pause, setspeed, setbandwidth, cancel).
The tunnelled migration loop does not currently support this functionality, but should. Refactor the code to allow it to be used with tunnelled migration. --- src/qemu/qemu_migration.c | 252 +++++++++++++++++++++++++-------------------- 1 files changed, 142 insertions(+), 110 deletions(-)
I was able to apply patches 1-9 without too many issues, but got a nasty merge conflict with patch 10, so I am no longer compile testing at this point. :( Don't know if you can quickly rebase and repost for a better review. Also, how does this fit in with Federico's pending patch to support blkstat/blkinfo during migration? [I still need to review his v2] https://www.redhat.com/archives/libvir-list/2011-May/msg00669.html
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6cb529f..62b6b9e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -583,14 +583,148 @@ qemuMigrationSetOffline(struct qemud_driver *driver, }
-int -qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm) +static int +qemuMigrationProcessJobSignals(struct qemud_driver *driver, + virDomainObjPtr vm, + const char *job) { + qemuDomainObjPrivatePtr priv = vm->privateData; + int ret = -1; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), + job, _("guest unexpectedly quit")); + return -1; + } + + if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) { + priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL; + VIR_DEBUG0("Cancelling job at client request");
s/VIR_DEBUG0/VIR_DEBUG/ throughout Overall, it looks like a sane split (taking two portions out of the loop and into their own functions). -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

The doTunnelMigrate method forgot to set the bandwidth resource restriction * src/qemu/qemu_migration.c: Set resource restriction --- src/qemu/qemu_migration.c | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 62b6b9e..e23b690 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1301,7 +1301,6 @@ static int doTunnelSendAll(virStreamPtr st, return -1; } - /* XXX should honour the 'resource' parameter here */ for (;;) { nbytes = saferead(sock, buffer, nbytes); if (nbytes < 0) { @@ -1341,7 +1340,7 @@ static int doTunnelMigrate(struct qemud_driver *driver, char **cookieout, int *cookieoutlen, unsigned long flags, - unsigned long resource ATTRIBUTE_UNUSED) + unsigned long resource) { qemuDomainObjPrivatePtr priv = vm->privateData; int client_sock = -1; @@ -1423,6 +1422,11 @@ static int doTunnelMigrate(struct qemud_driver *driver, /* 3. start migration on source */ qemuDomainObjEnterMonitorWithDriver(driver, vm); + if (resource > 0 && + qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) { + qemuDomainObjExitMonitorWithDriver(driver, vm); + goto cleanup; + } if (flags & VIR_MIGRATE_NON_SHARED_DISK) background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK; -- 1.7.4.4

On 05/11/2011 03:09 AM, Daniel P. Berrange wrote:
The doTunnelMigrate method forgot to set the bandwidth resource restriction
* src/qemu/qemu_migration.c: Set resource restriction --- src/qemu/qemu_migration.c | 8 ++++++-- 1 files changed, 6 insertions(+), 2 deletions(-)
ACK. This one can be floated up before 10/16 if you want (that is, I was able to compile-test it). And with that, I'll have to pause my review until tomorrow. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

The 'nbytes' variable was not re-initialized to the buffer size on each iteration of the tunnelled migration loop. While saferead() will ensure a full read, except on EOF, it is clearer to use the real buffer size * src/qemu/qemu_migration.c: Always read full buffer of data --- src/qemu/qemu_migration.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index e23b690..a39ebcf 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1302,7 +1302,7 @@ static int doTunnelSendAll(virStreamPtr st, } for (;;) { - nbytes = saferead(sock, buffer, nbytes); + nbytes = saferead(sock, buffer, TUNNEL_SEND_BUF_SIZE); if (nbytes < 0) { virReportSystemError(errno, "%s", _("tunnelled migration failed to read from qemu")); -- 1.7.4.4

Cancelling the QEMU migration may cause QEMU to flush pending data on the migration socket. This may in turn block QEMU if nothing reads from the other end of the socket. Closing the socket before cancelling QEMU migration avoids this possible deadlock. * src/qemu/qemu_migration.c: Close sockets before cancelling migration on failure --- src/qemu/qemu_migration.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index a39ebcf..1c86373 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1496,6 +1496,8 @@ static int doTunnelMigrate(struct qemud_driver *driver, cancel: if (ret != 0 && virDomainObjIsActive(vm)) { + VIR_FORCE_CLOSE(client_sock); + VIR_FORCE_CLOSE(qemu_sock); qemuDomainObjEnterMonitorWithDriver(driver, vm); qemuMonitorMigrateCancel(priv->mon); qemuDomainObjExitMonitorWithDriver(driver, vm); -- 1.7.4.4

virStreamSend already sets an error message, so don't overwrite it * src/qemu/qemu_migration.c: Remove bogus error report --- src/qemu/qemu_migration.c | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 1c86373..b8e595e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1315,8 +1315,6 @@ static int doTunnelSendAll(virStreamPtr st, break; if (virStreamSend(st, buffer, nbytes) < 0) { - qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", - _("Failed to write migration data to remote libvirtd")); VIR_FREE(buffer); return -1; } -- 1.7.4.4

By running the doTunnelSendAll code in a separate thread, the main thread can do qemuMigrationWaitForCompletion as with normal migration. This in turn ensures that job signals work correctly and that progress monitoring can be done * src/qemu/qemu_migration.c: Runn tunnelled migration in separate thread --- src/qemu/qemu_migration.c | 95 ++++++++++++++++++++++++++++++++++++++------- 1 files changed, 81 insertions(+), 14 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index b8e595e..5413186 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1289,44 +1289,101 @@ cleanup: #define TUNNEL_SEND_BUF_SIZE 65536 -static int doTunnelSendAll(virStreamPtr st, - int sock) +typedef struct _qemuMigrationIOThread qemuMigrationIOThread; +typedef qemuMigrationIOThread * qemuMigrationIOThreadPtr; +struct _qemuMigrationIOThread { + virThread thread; + virStreamPtr st; + int sock; + virError err; +}; + +static void qemuMigrationIOFunc(void *arg) { + qemuMigrationIOThreadPtr data = arg; char *buffer; int nbytes = TUNNEL_SEND_BUF_SIZE; if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0) { virReportOOMError(); - virStreamAbort(st); - return -1; + virStreamAbort(data->st); + goto error; } for (;;) { - nbytes = saferead(sock, buffer, TUNNEL_SEND_BUF_SIZE); + nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE); if (nbytes < 0) { virReportSystemError(errno, "%s", _("tunnelled migration failed to read from qemu")); - virStreamAbort(st); + virStreamAbort(data->st); VIR_FREE(buffer); - return -1; + goto error; } else if (nbytes == 0) /* EOF; get out of here */ break; - if (virStreamSend(st, buffer, nbytes) < 0) { + if (virStreamSend(data->st, buffer, nbytes) < 0) { VIR_FREE(buffer); - return -1; + goto error; } } VIR_FREE(buffer); - if (virStreamFinish(st) < 0) - /* virStreamFinish set the error for us */ - return -1; + if (virStreamFinish(data->st) < 0) + goto error; - return 0; + return; + +error: + virCopyLastError(&data->err); + virResetLastError(); +} + + +static qemuMigrationIOThreadPtr +qemuMigrationStartTunnel(virStreamPtr st, + int sock) +{ + qemuMigrationIOThreadPtr io; + + if (VIR_ALLOC(io) < 0) { + virReportOOMError(); + return NULL; + } + + io->st = st; + io->sock = sock; + + if (virThreadCreate(&io->thread, true, + qemuMigrationIOFunc, + io) < 0) { + VIR_FREE(io); + return NULL; + } + + return io; +} + +static int +qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io) +{ + int rv = -1; + virThreadJoin(&io->thread); + + /* Forward error from the IO thread, to this thread */ + if (io->err.code != VIR_ERR_OK) { + virSetError(&io->err); + virResetError(&io->err); + goto cleanup; + } + + rv = 0; + +cleanup: + VIR_FREE(io); + return rv; } @@ -1351,6 +1408,7 @@ static int doTunnelMigrate(struct qemud_driver *driver, unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND; int ret = -1; qemuMigrationCookiePtr mig = NULL; + qemuMigrationIOThreadPtr iothread = NULL; if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) && !qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) { @@ -1486,7 +1544,16 @@ static int doTunnelMigrate(struct qemud_driver *driver, goto cancel; } - ret = doTunnelSendAll(st, client_sock); + if (!(iothread = qemuMigrationStartTunnel(st, client_sock))) + goto cancel; + + ret = qemuMigrationWaitForCompletion(driver, vm); + + /* Close now to ensure the IO thread quits & is joinable in next method */ + VIR_FORCE_CLOSE(client_sock); + + if (qemuMigrationStopTunnel(iothread) < 0) + ret = -1; if (ret == 0 && qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) -- 1.7.4.4

When failing to marshall an XDR message, include the full program/version/status/proc/type info, to allow easier debugging & diagnosis of the problem. * src/remote/remote_driver.c: Improve error when marshalling fails --- src/remote/remote_driver.c | 12 +++++++++--- 1 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index e113e39..9afa180 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -5365,7 +5365,9 @@ prepareCall(struct private_data *priv, } if (!(*args_filter) (&xdr, args)) { - remoteError(VIR_ERR_RPC, "%s", _("marshalling args")); + remoteError(VIR_ERR_RPC, + _("Unable to marshall arguments for program %d version %d procedure %d type %d status %d"), + hdr.prog, hdr.vers, hdr.proc, hdr.type, hdr.status); goto error; } @@ -5821,7 +5823,9 @@ processCallDispatchReply(virConnectPtr conn ATTRIBUTE_UNUSED, switch (hdr->status) { case REMOTE_OK: if (!(*thecall->ret_filter) (xdr, thecall->ret)) { - remoteError(VIR_ERR_RPC, "%s", _("unmarshalling ret")); + remoteError(VIR_ERR_RPC, + _("Unable to marshall reply for program %d version %d procedure %d type %d status %d"), + hdr->prog, hdr->vers, hdr->proc, hdr->type, hdr->status); return -1; } thecall->mode = REMOTE_MODE_COMPLETE; @@ -5830,7 +5834,9 @@ processCallDispatchReply(virConnectPtr conn ATTRIBUTE_UNUSED, case REMOTE_ERROR: memset (&thecall->err, 0, sizeof thecall->err); if (!xdr_remote_error (xdr, &thecall->err)) { - remoteError(VIR_ERR_RPC, "%s", _("unmarshalling remote_error")); + remoteError(VIR_ERR_RPC, + _("Unable to marshall error for program %d version %d procedure %d type %d status %d"), + hdr->prog, hdr->vers, hdr->proc, hdr->type, hdr->status); return -1; } thecall->mode = REMOTE_MODE_ERROR; -- 1.7.4.4
participants (3)
-
Daniel P. Berrange
-
Eric Blake
-
Jiri Denemark