[libvirt] [PATCH] qemu: Transfer inactive XML among cookie

If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- src/qemu/qemu_migration.c | 91 ++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 82 insertions(+), 9 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d9f8d93..bc98708 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -64,6 +64,7 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST, enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, + QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -71,11 +72,12 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate"); + "graphics", "lockstate", "persistent"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), + QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -110,6 +112,9 @@ struct _qemuMigrationCookie { /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */ qemuMigrationCookieGraphicsPtr graphics; + + /* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */ + virDomainDefPtr persistent; }; static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap) @@ -334,6 +339,26 @@ qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig, } +static int +qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig, + virDomainObjPtr dom) +{ + if (mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Migration persistent data already present")); + return -1; + } + + if (!dom->newDef) + return 0; + + mig->persistent = dom->newDef; + mig->flags |= QEMU_MIGRATION_COOKIE_PERSISTENT; + mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_PERSISTENT; + return 0; +} + + static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, qemuMigrationCookieGraphicsPtr grap) @@ -354,10 +379,12 @@ static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, static void qemuMigrationCookieXMLFormat(virBufferPtr buf, - qemuMigrationCookiePtr mig) + qemuMigrationCookiePtr mig, + struct qemud_driver *driver) { char uuidstr[VIR_UUID_STRING_BUFLEN]; char hostuuidstr[VIR_UUID_STRING_BUFLEN]; + char *domXML; int i; virUUIDFormat(mig->uuid, uuidstr); @@ -388,15 +415,25 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, virBufferAddLit(buf, " </lockstate>\n"); } + if ((mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) && + mig->persistent) { + domXML = qemuDomainDefFormatXML(driver, mig->persistent, + VIR_DOMAIN_XML_INACTIVE | + VIR_DOMAIN_XML_SECURE); + virBufferAdd(buf, domXML, -1); + VIR_FREE(domXML); + } + virBufferAddLit(buf, "</qemu-migration>\n"); } -static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig) +static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig, + struct qemud_driver *driver) { virBuffer buf = VIR_BUFFER_INITIALIZER; - qemuMigrationCookieXMLFormat(&buf, mig); + qemuMigrationCookieXMLFormat(&buf, mig, driver); if (virBufferError(&buf)) { virReportOOMError(); @@ -460,6 +497,8 @@ error: static int qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, + struct qemud_driver *driver, + xmlDocPtr doc, xmlXPathContextPtr ctxt, unsigned int flags) { @@ -583,6 +622,25 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(mig->lockState); } + if ((flags & QEMU_MIGRATION_COOKIE_PERSISTENT) && + virXPathBoolean("count(./domain) > 0", ctxt)) { + if ((n = virXPathNodeSet("./domain", ctxt, &nodes)) > 1) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Too many domain elements in " + "migration cookie: %d"), + n); + goto error; + } + mig->persistent = virDomainDefParseNode(driver->caps, doc, nodes[0], + -1, VIR_DOMAIN_XML_INACTIVE); + if (!mig->persistent) { + /* virDomainDefParseNode already reported + * an error for us */ + goto error; + } + VIR_FREE(nodes); + } + return 0; error: @@ -594,6 +652,7 @@ error: static int qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, + struct qemud_driver *driver, const char *xml, unsigned int flags) { @@ -606,7 +665,7 @@ qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, if (!(doc = virXMLParseStringCtxt(xml, _("(qemu_migration_cookie)"), &ctxt))) goto cleanup; - ret = qemuMigrationCookieXMLParse(mig, ctxt, flags); + ret = qemuMigrationCookieXMLParse(mig, driver, doc, ctxt, flags); cleanup: xmlXPathFreeContext(ctxt); @@ -637,7 +696,11 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddLockstate(mig, driver, dom) < 0) return -1; - if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) + if (flags & QEMU_MIGRATION_COOKIE_PERSISTENT && + qemuMigrationCookieAddPersistent(mig, dom) < 0) + return -1; + + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig, driver))) return -1; *cookieoutlen = strlen(*cookieout) + 1; @@ -672,6 +735,7 @@ qemuMigrationEatCookie(struct qemud_driver *driver, if (cookiein && cookieinlen && qemuMigrationCookieXMLParseStr(mig, + driver, cookiein, flags) < 0) goto error; @@ -1575,7 +1639,8 @@ cleanup: } if (ret == 0 && - qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_PERSISTENT ) < 0) VIR_WARN("Unable to encode migration cookie"); qemuMigrationCookieFree(mig); @@ -2477,6 +2542,7 @@ qemuMigrationFinish(struct qemud_driver *driver, int newVM = 1; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; + int cookie_flags = 0; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -2490,7 +2556,11 @@ qemuMigrationFinish(struct qemud_driver *driver, v3proto ? QEMU_MIGRATION_PHASE_FINISH3 : QEMU_MIGRATION_PHASE_FINISH2); - if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) + if (flags & VIR_MIGRATE_PERSIST_DEST) + cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT; + + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, + cookieinlen, cookie_flags))) goto endjob; /* Did the migration go as planned? If yes, return the domain @@ -2510,7 +2580,10 @@ qemuMigrationFinish(struct qemud_driver *driver, if (vm->persistent) newVM = 0; vm->persistent = 1; - vmdef = virDomainObjGetPersistentDef(driver->caps, vm); + if (mig->persistent) + vm->newDef = vmdef = mig->persistent; + else + vmdef = virDomainObjGetPersistentDef(driver->caps, vm); if (virDomainSaveConfig(driver->configDir, vmdef) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain -- 1.7.3.4

On 09/15/2011 08:04 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- src/qemu/qemu_migration.c | 91 ++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 82 insertions(+), 9 deletions(-)
I haven't looked at the patch closely, but think you are on the right track. At the high level, though, I'm wondering if our cookie is large enough - what limits does our RPC protocol already provide on the size of domain xml going over the wire? We're basically doubling that size, by sending both a full domain xml and a cookie containing another domain xml. For example, if the current RPC limit is 64k of data on one transaction, and we have a 33k domain xml, then we will fail to migrate unless we also bump RPC limits. But since I don't know the current limits on RPC sizes, I may be worrying about nothing. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

On 15.09.2011 16:16, Eric Blake wrote:
On 09/15/2011 08:04 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- src/qemu/qemu_migration.c | 91 ++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 82 insertions(+), 9 deletions(-)
I haven't looked at the patch closely, but think you are on the right track. At the high level, though, I'm wondering if our cookie is large enough - what limits does our RPC protocol already provide on the size of domain xml going over the wire? We're basically doubling that size, by sending both a full domain xml and a cookie containing another domain xml. For example, if the current RPC limit is 64k of data on one transaction, and we have a 33k domain xml, then we will fail to migrate unless we also bump RPC limits. But since I don't know the current limits on RPC sizes, I may be worrying about nothing.
From src/rpc/virnetprotocol.x:
/* Maximum total message size (serialised). */ const VIR_NET_MESSAGE_MAX = 262144; /* Size of struct virNetMessageHeader (serialised)*/ const VIR_NET_MESSAGE_HEADER_MAX = 24; /* Size of message payload */ const VIR_NET_MESSAGE_PAYLOAD_MAX = 262120; /* Size of message length field. Not counted in VIR_NET_MESSAGE_MAX */ const VIR_NET_MESSAGE_LEN_MAX = 4; /* Length of long, but not unbounded, strings. * This is an arbitrary limit designed to stop the decoder from trying * to allocate unbounded amounts of memory when fed with a bad message. */ const VIR_NET_MESSAGE_STRING_MAX = 65536; So the max size of string is 65K. BUT, because we don't sent 2 XML in one RPC I don't think we are gonna hit that limit soon. My patch sent domain XML among with cookie which is small, not 2 XMLs at the same time. Right now, at the finish phase - which is the place where I send domain xml - the size of cookie is 222B + sizeof(domain name). Therefore I think there is no need to increase the bound for string. Michal

On 09/15/2011 08:38 AM, Michal Privoznik wrote:
So the max size of string is 65K. BUT, because we don't sent 2 XML in one RPC I don't think we are gonna hit that limit soon. My patch sent domain XML among with cookie which is small, not 2 XMLs at the same time. Right now, at the finish phase - which is the place where I send domain xml - the size of cookie is 222B + sizeof(domain name).
Therefore I think there is no need to increase the bound for string.
Thanks; that answered my concerns. By sending the original xml between begin/prepare, but the persistent xml in the cookie between perform/finish, you've split the work, so that neither send is inordinately large. No problem then, and I was over-reacting :) -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

On 09/15/2011 08:04 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- src/qemu/qemu_migration.c | 91 ++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 82 insertions(+), 9 deletions(-)
Now I've actually looked in depth at the code.
@@ -388,15 +415,25 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, virBufferAddLit(buf, "</lockstate>\n"); }
+ if ((mig->flags& QEMU_MIGRATION_COOKIE_PERSISTENT)&& + mig->persistent) { + domXML = qemuDomainDefFormatXML(driver, mig->persistent, + VIR_DOMAIN_XML_INACTIVE | + VIR_DOMAIN_XML_SECURE);
I'm wondering if this should use virDomainDefParseString instead of qemuDomainDefFormatXML; after all, the latter only acts on VIR_DOMAIN_XML_UPDATE_CPU (which we aren't passing), while the former doesn't need a driver argument. On the other hand, it's the same thing we used in qemu_driver.c:qemuDomainSaveImageDefineXML, so we have precedent. I guess it's up to you whether to refactor and post v2, or to commit this as is and save that sort of cleanup for a separate patch.
+ virBufferAdd(buf, domXML, -1); + VIR_FREE(domXML); + } + virBufferAddLit(buf, "</qemu-migration>\n");
Oh cool - another place besides my snapshot work where making domain XML formatting auto-indented would produce nicer-looking output. Not a show-stopper for this patch, but I'll have to remember it when I refactor domain_conf.c to pass indentation parameters around. I don't know if you want to do a v2 that avoids adding the driver argument in that many places, but if not, then I didn't see anything wrong with this code. I can live with ACK for this patch as-is, although if you decide to refactor the domain formatting, then it's probably worth posting a v2 for review. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- diff to v1: -substitute passing driver with cappabilities -s/qemuDomainDefFormatXML/virDomainDefParseString/ src/qemu/qemu_migration.c | 92 ++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 83 insertions(+), 9 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d9f8d93..0b53141 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -64,6 +64,7 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST, enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, + QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -71,11 +72,12 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate"); + "graphics", "lockstate", "persistent"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), + QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -110,6 +112,9 @@ struct _qemuMigrationCookie { /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */ qemuMigrationCookieGraphicsPtr graphics; + + /* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */ + virDomainDefPtr persistent; }; static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap) @@ -334,6 +339,26 @@ qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig, } +static int +qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig, + virDomainObjPtr dom) +{ + if (mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Migration persistent data already present")); + return -1; + } + + if (!dom->newDef) + return 0; + + mig->persistent = dom->newDef; + mig->flags |= QEMU_MIGRATION_COOKIE_PERSISTENT; + mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_PERSISTENT; + return 0; +} + + static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, qemuMigrationCookieGraphicsPtr grap) @@ -354,10 +379,12 @@ static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, static void qemuMigrationCookieXMLFormat(virBufferPtr buf, - qemuMigrationCookiePtr mig) + qemuMigrationCookiePtr mig, + virCapsPtr caps) { char uuidstr[VIR_UUID_STRING_BUFLEN]; char hostuuidstr[VIR_UUID_STRING_BUFLEN]; + char *domXML; int i; virUUIDFormat(mig->uuid, uuidstr); @@ -388,15 +415,26 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, virBufferAddLit(buf, " </lockstate>\n"); } + if ((mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) && + mig->persistent) { + domXML = virDomainDefParseString(caps, mig->persistent, + QEMU_EXPECTED_VIRT_TYPES, + VIR_DOMAIN_XML_INACTIVE | + VIR_DOMAIN_XML_SECURE); + virBufferAdd(buf, domXML, -1); + VIR_FREE(domXML); + } + virBufferAddLit(buf, "</qemu-migration>\n"); } -static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig) +static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig, + virCapsPtr caps) { virBuffer buf = VIR_BUFFER_INITIALIZER; - qemuMigrationCookieXMLFormat(&buf, mig); + qemuMigrationCookieXMLFormat(&buf, mig, caps); if (virBufferError(&buf)) { virReportOOMError(); @@ -460,6 +498,8 @@ error: static int qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, + virCapsPtr caps, + xmlDocPtr doc, xmlXPathContextPtr ctxt, unsigned int flags) { @@ -583,6 +623,25 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(mig->lockState); } + if ((flags & QEMU_MIGRATION_COOKIE_PERSISTENT) && + virXPathBoolean("count(./domain) > 0", ctxt)) { + if ((n = virXPathNodeSet("./domain", ctxt, &nodes)) > 1) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Too many domain elements in " + "migration cookie: %d"), + n); + goto error; + } + mig->persistent = virDomainDefParseNode(caps, doc, nodes[0], -1, + VIR_DOMAIN_XML_INACTIVE); + if (!mig->persistent) { + /* virDomainDefParseNode already reported + * an error for us */ + goto error; + } + VIR_FREE(nodes); + } + return 0; error: @@ -594,6 +653,7 @@ error: static int qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, + virCapsPtr caps, const char *xml, unsigned int flags) { @@ -606,7 +666,7 @@ qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, if (!(doc = virXMLParseStringCtxt(xml, _("(qemu_migration_cookie)"), &ctxt))) goto cleanup; - ret = qemuMigrationCookieXMLParse(mig, ctxt, flags); + ret = qemuMigrationCookieXMLParse(mig, caps, doc, ctxt, flags); cleanup: xmlXPathFreeContext(ctxt); @@ -637,7 +697,11 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddLockstate(mig, driver, dom) < 0) return -1; - if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) + if (flags & QEMU_MIGRATION_COOKIE_PERSISTENT && + qemuMigrationCookieAddPersistent(mig, dom) < 0) + return -1; + + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig, driver->caps))) return -1; *cookieoutlen = strlen(*cookieout) + 1; @@ -672,6 +736,7 @@ qemuMigrationEatCookie(struct qemud_driver *driver, if (cookiein && cookieinlen && qemuMigrationCookieXMLParseStr(mig, + driver->caps, cookiein, flags) < 0) goto error; @@ -1575,7 +1640,8 @@ cleanup: } if (ret == 0 && - qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_PERSISTENT ) < 0) VIR_WARN("Unable to encode migration cookie"); qemuMigrationCookieFree(mig); @@ -2477,6 +2543,7 @@ qemuMigrationFinish(struct qemud_driver *driver, int newVM = 1; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; + int cookie_flags = 0; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -2490,7 +2557,11 @@ qemuMigrationFinish(struct qemud_driver *driver, v3proto ? QEMU_MIGRATION_PHASE_FINISH3 : QEMU_MIGRATION_PHASE_FINISH2); - if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) + if (flags & VIR_MIGRATE_PERSIST_DEST) + cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT; + + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, + cookieinlen, cookie_flags))) goto endjob; /* Did the migration go as planned? If yes, return the domain @@ -2510,7 +2581,10 @@ qemuMigrationFinish(struct qemud_driver *driver, if (vm->persistent) newVM = 0; vm->persistent = 1; - vmdef = virDomainObjGetPersistentDef(driver->caps, vm); + if (mig->persistent) + vm->newDef = vmdef = mig->persistent; + else + vmdef = virDomainObjGetPersistentDef(driver->caps, vm); if (virDomainSaveConfig(driver->configDir, vmdef) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain -- 1.7.3.4

If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- diff to v2: -Fix substitution on wrong place src/qemu/qemu_migration.c | 81 ++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 76 insertions(+), 5 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index d9f8d93..8ea8508 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -64,6 +64,7 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST, enum qemuMigrationCookieFlags { QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS, QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE, + QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT, QEMU_MIGRATION_COOKIE_FLAG_LAST }; @@ -71,11 +72,12 @@ enum qemuMigrationCookieFlags { VIR_ENUM_DECL(qemuMigrationCookieFlag); VIR_ENUM_IMPL(qemuMigrationCookieFlag, QEMU_MIGRATION_COOKIE_FLAG_LAST, - "graphics", "lockstate"); + "graphics", "lockstate", "persistent"); enum qemuMigrationCookieFeatures { QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS), QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE), + QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT), }; typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics; @@ -110,6 +112,9 @@ struct _qemuMigrationCookie { /* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */ qemuMigrationCookieGraphicsPtr graphics; + + /* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */ + virDomainDefPtr persistent; }; static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap) @@ -334,6 +339,26 @@ qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig, } +static int +qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig, + virDomainObjPtr dom) +{ + if (mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Migration persistent data already present")); + return -1; + } + + if (!dom->newDef) + return 0; + + mig->persistent = dom->newDef; + mig->flags |= QEMU_MIGRATION_COOKIE_PERSISTENT; + mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_PERSISTENT; + return 0; +} + + static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf, qemuMigrationCookieGraphicsPtr grap) @@ -358,6 +383,7 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, { char uuidstr[VIR_UUID_STRING_BUFLEN]; char hostuuidstr[VIR_UUID_STRING_BUFLEN]; + char *domXML; int i; virUUIDFormat(mig->uuid, uuidstr); @@ -388,6 +414,15 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf, virBufferAddLit(buf, " </lockstate>\n"); } + if ((mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) && + mig->persistent) { + domXML = virDomainDefFormat(mig->persistent, + VIR_DOMAIN_XML_INACTIVE | + VIR_DOMAIN_XML_SECURE); + virBufferAdd(buf, domXML, -1); + VIR_FREE(domXML); + } + virBufferAddLit(buf, "</qemu-migration>\n"); } @@ -460,6 +495,8 @@ error: static int qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, + struct qemud_driver *driver, + xmlDocPtr doc, xmlXPathContextPtr ctxt, unsigned int flags) { @@ -583,6 +620,25 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig, VIR_FREE(mig->lockState); } + if ((flags & QEMU_MIGRATION_COOKIE_PERSISTENT) && + virXPathBoolean("count(./domain) > 0", ctxt)) { + if ((n = virXPathNodeSet("./domain", ctxt, &nodes)) > 1) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("Too many domain elements in " + "migration cookie: %d"), + n); + goto error; + } + mig->persistent = virDomainDefParseNode(driver->caps, doc, nodes[0], + -1, VIR_DOMAIN_XML_INACTIVE); + if (!mig->persistent) { + /* virDomainDefParseNode already reported + * an error for us */ + goto error; + } + VIR_FREE(nodes); + } + return 0; error: @@ -594,6 +650,7 @@ error: static int qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, + struct qemud_driver *driver, const char *xml, unsigned int flags) { @@ -606,7 +663,7 @@ qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig, if (!(doc = virXMLParseStringCtxt(xml, _("(qemu_migration_cookie)"), &ctxt))) goto cleanup; - ret = qemuMigrationCookieXMLParse(mig, ctxt, flags); + ret = qemuMigrationCookieXMLParse(mig, driver, doc, ctxt, flags); cleanup: xmlXPathFreeContext(ctxt); @@ -637,6 +694,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig, qemuMigrationCookieAddLockstate(mig, driver, dom) < 0) return -1; + if (flags & QEMU_MIGRATION_COOKIE_PERSISTENT && + qemuMigrationCookieAddPersistent(mig, dom) < 0) + return -1; + if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig))) return -1; @@ -672,6 +733,7 @@ qemuMigrationEatCookie(struct qemud_driver *driver, if (cookiein && cookieinlen && qemuMigrationCookieXMLParseStr(mig, + driver, cookiein, flags) < 0) goto error; @@ -1575,7 +1637,8 @@ cleanup: } if (ret == 0 && - qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0) + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_PERSISTENT ) < 0) VIR_WARN("Unable to encode migration cookie"); qemuMigrationCookieFree(mig); @@ -2477,6 +2540,7 @@ qemuMigrationFinish(struct qemud_driver *driver, int newVM = 1; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; + int cookie_flags = 0; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -2490,7 +2554,11 @@ qemuMigrationFinish(struct qemud_driver *driver, v3proto ? QEMU_MIGRATION_PHASE_FINISH3 : QEMU_MIGRATION_PHASE_FINISH2); - if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) + if (flags & VIR_MIGRATE_PERSIST_DEST) + cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT; + + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, + cookieinlen, cookie_flags))) goto endjob; /* Did the migration go as planned? If yes, return the domain @@ -2510,7 +2578,10 @@ qemuMigrationFinish(struct qemud_driver *driver, if (vm->persistent) newVM = 0; vm->persistent = 1; - vmdef = virDomainObjGetPersistentDef(driver->caps, vm); + if (mig->persistent) + vm->newDef = vmdef = mig->persistent; + else + vmdef = virDomainObjGetPersistentDef(driver->caps, vm); if (virDomainSaveConfig(driver->configDir, vmdef) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain -- 1.7.3.4

On 09/16/2011 03:51 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- diff to v2: -Fix substitution on wrong place src/qemu/qemu_migration.c | 81 ++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 76 insertions(+), 5 deletions(-)
Looks right to me, but I'd feel more comfortable with a second ACK from danpb. I'm also hesitant on whether we should apply this now, or wait to post-0.9.5. While it is fixing a design flaw, it is not a regression fix, and feels more like a feature addition - if Dan could run this through his TCK suite to ensure no migration regressions, I'd feel a bit better about taking it now. -- Eric Blake eblake@redhat.com +1-801-349-2682 Libvirt virtualization library http://libvirt.org

On Fri, Sep 16, 2011 at 08:35:15AM -0600, Eric Blake wrote:
On 09/16/2011 03:51 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- diff to v2: -Fix substitution on wrong place src/qemu/qemu_migration.c | 81 ++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 76 insertions(+), 5 deletions(-)
Looks right to me, but I'd feel more comfortable with a second ACK from danpb. I'm also hesitant on whether we should apply this now, or wait to post-0.9.5. While it is fixing a design flaw, it is not a regression fix, and feels more like a feature addition - if Dan could run this through his TCK suite to ensure no migration regressions, I'd feel a bit better about taking it now.
I tend to agree. The code looks reasonable, but I think we should wait till post 0.9.5, to give more time for testing. In particular the interoperability beteen different versions of libvirt when doing the migration Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 16.09.2011 16:53, Daniel P. Berrange wrote:
On Fri, Sep 16, 2011 at 08:35:15AM -0600, Eric Blake wrote:
On 09/16/2011 03:51 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- diff to v2: -Fix substitution on wrong place src/qemu/qemu_migration.c | 81 ++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 76 insertions(+), 5 deletions(-)
Looks right to me, but I'd feel more comfortable with a second ACK from danpb. I'm also hesitant on whether we should apply this now, or wait to post-0.9.5. While it is fixing a design flaw, it is not a regression fix, and feels more like a feature addition - if Dan could run this through his TCK suite to ensure no migration regressions, I'd feel a bit better about taking it now.
I tend to agree. The code looks reasonable, but I think we should wait till post 0.9.5, to give more time for testing. In particular the interoperability beteen different versions of libvirt when doing the migration
Daniel
Okay. Fair enough. Dan, if you'll have time and run it through your TCK, that would be great. I'll hold on pushing this untill 0.9.5 is released. Michal

On 16.09.2011 17:20, Michal Privoznik wrote:
On 16.09.2011 16:53, Daniel P. Berrange wrote:
On Fri, Sep 16, 2011 at 08:35:15AM -0600, Eric Blake wrote:
On 09/16/2011 03:51 AM, Michal Privoznik wrote:
If a domain has inactive XML we want to transfer it to destination when migrating with VIR_MIGRATE_PERSIST_DEST. In order to harm the migration protocol as least as possible, a optional cookie was chosen. --- diff to v2: -Fix substitution on wrong place src/qemu/qemu_migration.c | 81 ++++++++++++++++++++++++++++++++++++++++++--- 1 files changed, 76 insertions(+), 5 deletions(-)
Looks right to me, but I'd feel more comfortable with a second ACK from danpb. I'm also hesitant on whether we should apply this now, or wait to post-0.9.5. While it is fixing a design flaw, it is not a regression fix, and feels more like a feature addition - if Dan could run this through his TCK suite to ensure no migration regressions, I'd feel a bit better about taking it now.
I tend to agree. The code looks reasonable, but I think we should wait till post 0.9.5, to give more time for testing. In particular the interoperability beteen different versions of libvirt when doing the migration
Daniel
Okay. Fair enough. Dan, if you'll have time and run it through your TCK, that would be great.
I'll hold on pushing this untill 0.9.5 is released.
Michal
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
So I went ahead and pushed. Thanks. Michal
participants (3)
-
Daniel P. Berrange
-
Eric Blake
-
Michal Privoznik