On Sep 6, 2012, at 10:58 AM, Laine Stump wrote:
On 09/04/2012 04:35 PM, Kyle Mestery wrote:
> Add the ability to migrate per-port data on Open vSwitch
> ports during qemu live migration. A controller can use this
> to store data relating to each port, and have it migrated
> with the virtual machine and populated on the destination
> host.
Good job in figuring this out!
>
> Signed-off-by: Kyle Mestery <kmestery(a)cisco.com>
> Cc: Laine Stump <laine(a)laine.org>
> ---
> src/qemu/qemu_migration.c | 246 +++++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 244 insertions(+), 2 deletions(-)
>
> diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
> index 1b21ef6..8c1a8f1 100644
> --- a/src/qemu/qemu_migration.c
> +++ b/src/qemu/qemu_migration.c
> @@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags {
> QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
> QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
> QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
> + QEMU_MIGRATION_COOKIE_FLAG_OVS_PORT_DATA,
>
> QEMU_MIGRATION_COOKIE_FLAG_LAST
> };
> @@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags {
> VIR_ENUM_DECL(qemuMigrationCookieFlag);
> VIR_ENUM_IMPL(qemuMigrationCookieFlag,
> QEMU_MIGRATION_COOKIE_FLAG_LAST,
> - "graphics", "lockstate", "persistent");
> + "graphics", "lockstate", "persistent",
"ovsportdata");
>
> enum qemuMigrationCookieFeatures {
> QEMU_MIGRATION_COOKIE_GRAPHICS = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
> QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
> QEMU_MIGRATION_COOKIE_PERSISTENT = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
> + QEMU_MIGRATION_COOKIE_OVS_PORT_DATA = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_OVS_PORT_DATA),
> };
>
> typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
> @@ -95,6 +97,19 @@ struct _qemuMigrationCookieGraphics {
> char *tlsSubject;
> };
>
> +typedef struct _qemuMigrationCookieOvsPortData qemuMigrationCookieOvsPortData;
> +typedef qemuMigrationCookieOvsPortData *qemuMigrationCookieOvsPortDataPtr;
> +struct _qemuMigrationCookieOvsPortData {
> + /* How many virtual NICs are we saving data for? */
> + int nnets;
> +
> + /*
> + * Array of pointers to saved data. Each VIF will have it's own
> + * data to transfer.
> + */
> + char **portdata;
> +};
> +
> typedef struct _qemuMigrationCookie qemuMigrationCookie;
> typedef qemuMigrationCookie *qemuMigrationCookiePtr;
> struct _qemuMigrationCookie {
> @@ -120,6 +135,9 @@ struct _qemuMigrationCookie {
>
> /* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */
> virDomainDefPtr persistent;
> +
> + /* If (flags & QEMU_MIGRATION_COOKIE_OVS_PORT_DATA) */
> + qemuMigrationCookieOvsPortDataPtr ovsportdata;
> };
>
> static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap)
> @@ -132,6 +150,24 @@ static void
qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap)
> }
>
>
> +static void qemuMigrationCookieOvsPortDataFree(qemuMigrationCookieOvsPortDataPtr
> + ovsportdata)
> +{
> + int i;
> +
> + if (!ovsportdata)
> + return;
> +
> + for (i = 0; i < ovsportdata->nnets; i++) {
> + VIR_FREE(ovsportdata->portdata[i]);
> + }
> +
> + VIR_FREE(ovsportdata->portdata);
> +
> + VIR_FREE(ovsportdata);
> +}
> +
> +
> static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
> {
> if (!mig)
> @@ -140,6 +176,10 @@ static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
> if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS)
> qemuMigrationCookieGraphicsFree(mig->graphics);
>
> + if (mig->flags & QEMU_MIGRATION_COOKIE_OVS_PORT_DATA) {
> + qemuMigrationCookieOvsPortDataFree(mig->ovsportdata);
> + }
> +
> VIR_FREE(mig->localHostname);
> VIR_FREE(mig->remoteHostname);
> VIR_FREE(mig->name);
> @@ -256,6 +296,60 @@ error:
> }
>
>
> +static qemuMigrationCookieOvsPortDataPtr
> +qemuMigrationCookieOvsPortDataAlloc(struct qemud_driver *driver ATTRIBUTE_UNUSED,
> + virDomainDefPtr def)
> +{
> + qemuMigrationCookieOvsPortDataPtr mig;
> + int i;
> + virCommandPtr cmd = NULL;
> + virDomainNetDefPtr netptr;
> +
> + if (VIR_ALLOC(mig) < 0)
> + goto no_memory;
> +
> + mig->nnets = def->nnets;
> +
> + if (VIR_ALLOC_N(mig->portdata, def->nnets) < 0)
> + goto no_memory;
> +
> + for (i = 0; i < def->nnets; i++) {
> + virNetDevVPortProfilePtr vport =
virDomainNetGetActualVirtPortProfile(def->nets[i]);
> + netptr = def->nets[i];
> +
> + if (vport && vport->virtPortType ==
VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH) {
> + if (VIR_ALLOC(mig->portdata[i]) < 0)
> + goto no_memory;
> +
> + cmd = virCommandNewArgList(OVSVSCTL, "get",
"Interface",
> + netptr->ifname,
"external_ids:PortData",
> + NULL);
> +
> + virCommandSetOutputBuffer(cmd, &mig->portdata[i]);
> +
> + /* Run the command */
> + if (virCommandRun(cmd, NULL) < 0) {
> + virReportSystemError(VIR_ERR_INTERNAL_ERROR,
> + _("Unable to run command to get OVS port
data for "
> + "interface %s"), netptr->ifname);
> + goto error;
> + }
> +
> + /* Wipeout the newline */
> + mig->portdata[i][strlen(mig->portdata[i]) - 1] = '\0';
Both this run of ovs-vsctl and the one that re-populates the data at the
remote end should be done by functions in virnetdevopenvswitch.c
Hi Laine:
I'm in the process of reworking this patch along the lines you and Daniel have
provided input towards. I defined some helper functions in virnetdevopenvswitch.c,
but when calling them from qemu_migration.c, the build is failing during linking.
I suspect I need to add whatever gets built out of src/util to the qemu stuff, but before
going down that path, wanted to run this by the list. Here is the error I see:
make[3]: Leaving directory `/production/git/local/libvirt/libvirt/python'
Making all in tests
make[3]: Entering directory `/production/git/local/libvirt/libvirt/python/tests'
make[3]: Nothing to be done for `all'.
make[3]: Leaving directory `/production/git/local/libvirt/libvirt/python/tests'
make[2]: Leaving directory `/production/git/local/libvirt/libvirt/python'
Making all in tests
make[2]: Entering directory `/production/git/local/libvirt/libvirt/tests'
CCLD qemuxml2argvtest
../src/.libs/libvirt_driver_qemu_impl.a(libvirt_driver_qemu_impl_la-qemu_migration.o): In
function `qemuMigrationCookieNetworkAlloc':
/production/git/local/libvirt/libvirt/src/qemu/qemu_migration.c:343: undefined reference
to `virNetDevOpenvswitchGetMigrateData'
../src/.libs/libvirt_driver_qemu_impl.a(libvirt_driver_qemu_impl_la-qemu_migration.o): In
function `qemuDomainMigrateOPDRelocate':
/production/git/local/libvirt/libvirt/src/qemu/qemu_migration.c:1302: undefined reference
to `virNetDevOpenvswitchSetMigrateData'
collect2: error: ld returned 1 exit status
make[2]: *** [qemuxml2argvtest] Error 1
make[2]: Leaving directory `/production/git/local/libvirt/libvirt/tests'
make[1]: *** [all-recursive] Error 1
make[1]: Leaving directory `/production/git/local/libvirt/libvirt'
make: *** [all] Error 2
[kmestery@fedora-build libvirt]$
Any pointers appreciated! Once I get this addressed, I think the new patches are
ready for submission.
Thanks!
Kyle
> + }
> + }
> +
> + return mig;
> +
> +no_memory:
> + virReportOOMError();
> +error:
> + qemuMigrationCookieOvsPortDataFree(mig);
> + return NULL;
> +}
> +
> +
> static qemuMigrationCookiePtr
> qemuMigrationCookieNew(virDomainObjPtr dom)
> {
> @@ -370,6 +464,27 @@ qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig,
> }
>
>
> +static int
> +qemuMigrationCookieAddOvsPortData(qemuMigrationCookiePtr mig,
> + struct qemud_driver *driver,
> + virDomainObjPtr dom)
> +{
> + if (mig->flags & QEMU_MIGRATION_COOKIE_OVS_PORT_DATA) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("Migration ovs port data data already
present"));
> + return -1;
> + }
> +
> + if (dom->def->nnets >= 1) {
> + if (!(mig->ovsportdata = qemuMigrationCookieOvsPortDataAlloc(
> + driver, dom->def)))
> + return -1;
> + mig->flags |= QEMU_MIGRATION_COOKIE_OVS_PORT_DATA;
> + }
> +
> + return 0;
> +}
> +
>
> static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf,
> qemuMigrationCookieGraphicsPtr
grap)
> @@ -389,6 +504,25 @@ static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr
buf,
> }
>
>
> +static void qemuMigrationCookieOvsPortDataXMLFormat(virBufferPtr buf,
> +
qemuMigrationCookieOvsPortDataPtr optr)
> +{
> + int i;
> +
> + virBufferAsprintf(buf, " <ovsportdata nnets='%d'>\n",
optr->nnets);
A minor point - is there a reason for including nnets here, when it can
be implied from the number of <vif> elements and/or their "num"
attributes?
But beyond that, I'm thinking we might want a more generic toplevel
element name (similar to the graphics one being called <graphics> even
though it could be used for vnc or spice), so that in the future it
could be passed to / retrieved from a separate network driver/daemon
without regard (within the migration code at least) to the specific type
of the interface (or at least allow for the migration code to get
multiple types without needing to clutter the namespace).
In the end, what we're really doing here is sending a last-instant
update to the state of the domain's network interface. Thinking of it in
that way, it would be really nice if it mimicked the <network> element
of the domain XML as much as possible, to make explanations easier and
make future additions more automatic, but I'm not sure how far we want
to go in that direction, since the <network> element of the domain is
unnecessarily complex for this specific job (e.g. it has both the
configuration (which could be type='network') *and* the actual network
connection that's used (which could have a different type if the config
type is 'network'). Another difference specific to this case is that, in
order to detect an interface that uses Open vSwitch, you need to look
both for type='bridge' and for <virtualport type='openvswitch'>.
So, to take this to the very end of that road of reasoning, how bad of
an idea would it be to make the "cookie" for each interface be the full
live XML of that interface, then just include the portdata inside
<virtualport> for every interface that has <virtualport
type='openvswitch'>?
The advantage of doing it this way is that this would automatically
handle any other need for last-minute interface state change info we
might encounter in the future (as long as 1) the desired information is
included in virDomainNetDef and handled by the standard
parser/formatter, and 2) the cookie eating code did something with that
data), and we could just call the parse/format functions in src/conf
rather than needing our own. (A side effect is that "virsh dumpxml
$myguest" would display the portdata for every Open vSwitch interface.
Not having seen the length of the data, I don't know if that's a plus or
a minus :-)
(Note that this would mean you would need to call
virDomainNetGetActualVirtPortProfile() on the receiving end)
(really, I can see a potential need for this for other types of devices
that need up-to-the-last-instant state info sent to the destination. But
of course the more far-reaching you try to be, the bigger the chance
you'll get it wrong :-P)
> + if (optr->nnets > 0)
> + virBufferAsprintf(buf, " <vifs>\n");
Stepping back from my larger change idea above, does having the extra
<vifs> level in the hierarchy make something easier (either now or in a
hypothetical future with more data sent in this cookie)? Or could we
just have the <vif> elements right below the toplevel?
Oh, and just to shake things up - we need to account for the possibility
where the source host is using Open vSwitch, and the destination isn't :-)
> + for (i = 0; i < optr->nnets; i++) {
> + virBufferAsprintf(buf, " <vif num='%d'
portdata='%s'/>\n",
> + i, optr->portdata[i]);
> + }
> + if (optr->nnets > 0)
> + virBufferAsprintf(buf, " </vifs>\n");
> +
> + virBufferAddLit(buf, " </ovsportdata>\n");
> +}
> +
> +
> static int
> qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
> virBufferPtr buf,
> @@ -439,6 +573,10 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
> virBufferAdjustIndent(buf, -2);
> }
>
> + if ((mig->flags & QEMU_MIGRATION_COOKIE_OVS_PORT_DATA) &&
> + mig->ovsportdata)
> + qemuMigrationCookieOvsPortDataXMLFormat(buf, mig->ovsportdata);
> +
> virBufferAddLit(buf, "</qemu-migration>\n");
> return 0;
> }
> @@ -516,6 +654,57 @@ error:
> }
>
>
> +static qemuMigrationCookieOvsPortDataPtr
> +qemuMigrationCookieOvsPortDataXMLParse(xmlXPathContextPtr ctxt)
> +{
> + qemuMigrationCookieOvsPortDataPtr optr;
> + int i;
> + int n;
> + xmlNodePtr *vifs = NULL;
> +
> + if (VIR_ALLOC(optr) < 0)
> + goto no_memory;
> +
> + if (virXPathInt("string(./ovsportdata/@nnets)", ctxt,
&optr->nnets) < 0) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + "%s", _("missing nnets attribute in migration
data"));
> + goto error;
> + }
> +
> + if (VIR_ALLOC_N(optr->portdata, optr->nnets) < 0)
> + goto no_memory;
> +
> + if ((n = virXPathNodeSet("./ovsportdata/vifs/vif", ctxt, &vifs))
< 0) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + "%s", _("missing vif information"));
> + goto error;
> + }
> +
> + for (i = 0; i < n; i++) {
> + if (VIR_ALLOC(optr->portdata[i]) < 0)
> + goto no_memory;
> +
> + if (!(optr->portdata[i] = virXMLPropString(vifs[i],
"portdata"))) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + "%s", _("missing portdata attribute in
migration data"));
> + goto error;
> + }
> + }
> +
> + VIR_FREE(vifs);
> +
> + return optr;
> +
> +no_memory:
> + virReportOOMError();
> +error:
> + if (vifs)
> + VIR_FREE(vifs);
> + qemuMigrationCookieOvsPortDataFree(optr);
> + return NULL;
> +}
> +
> +
> static int
> qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
> struct qemud_driver *driver,
> @@ -662,6 +851,11 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
> VIR_FREE(nodes);
> }
>
> + if ((flags & QEMU_MIGRATION_COOKIE_OVS_PORT_DATA) &&
> + virXPathBoolean("count(./ovsportdata) > 0", ctxt) &&
> + (!(mig->ovsportdata = qemuMigrationCookieOvsPortDataXMLParse(ctxt))))
> + goto error;
> +
> return 0;
>
> error:
> @@ -721,6 +915,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
> qemuMigrationCookieAddPersistent(mig, dom) < 0)
> return -1;
>
> + if (flags & QEMU_MIGRATION_COOKIE_OVS_PORT_DATA &&
> + qemuMigrationCookieAddOvsPortData(mig, driver, dom) < 0)
> + return -1;
> +
> if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
> return -1;
>
> @@ -1050,6 +1248,44 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver
*driver,
> }
>
>
> +static int
> +qemuDomainMigrateOPDRelocate(struct qemud_driver *driver ATTRIBUTE_UNUSED,
> + virDomainObjPtr vm,
> + qemuMigrationCookiePtr cookie)
> +{
> + virCommandPtr cmd = NULL;
> + virDomainNetDefPtr netptr;
> + int ret = 0;
> + int i;
> + virBufferPtr buf;
> +
> + if (VIR_ALLOC(buf) < 0) {
> + ret = -1;
> + goto out;
> + }
> +
> + for (i = 0; i < cookie->ovsportdata->nnets; i++) {
> + netptr = vm->def->nets[i];
> +
> + virBufferAsprintf(buf, "external_ids:PortData=%s",
> + cookie->ovsportdata->portdata[i]);
> +
> + cmd = virCommandNewArgList(OVSVSCTL, "set",
"Interface",
> + netptr->ifname, virBufferCurrentContent(buf),
> + NULL);
> + /* Run the command */
> + if ((ret = virCommandRun(cmd, NULL)) < 0) {
> + virReportSystemError(VIR_ERR_INTERNAL_ERROR,
> + _("Unable to run command to set OVS port data
for "
> + "interface %s"),
vm->def->nets[i]->ifname);
> + }
> + }
> +
> +out:
> + return ret;
> +}
> +
> +
> /* This is called for outgoing non-p2p migrations when a connection to the
> * client which initiated the migration was closed but we were waiting for it
> * to follow up with the next phase, that is, in between
> @@ -1994,7 +2230,8 @@ cleanup:
>
> if (ret == 0 &&
> qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
> - QEMU_MIGRATION_COOKIE_PERSISTENT ) < 0)
> + QEMU_MIGRATION_COOKIE_PERSISTENT |
> + QEMU_MIGRATION_COOKIE_OVS_PORT_DATA) < 0)
> VIR_WARN("Unable to encode migration cookie");
>
> qemuMigrationCookieFree(mig);
> @@ -2929,6 +3166,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
>
> qemuDomainCleanupRemove(vm, qemuMigrationPrepareCleanup);
>
> + cookie_flags = QEMU_MIGRATION_COOKIE_OVS_PORT_DATA;
> if (flags & VIR_MIGRATE_PERSIST_DEST)
> cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
>
> @@ -2956,6 +3194,10 @@ qemuMigrationFinish(struct qemud_driver *driver,
> goto endjob;
> }
>
> + if (mig->ovsportdata)
> + if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
> + VIR_WARN("unable to provide data for OVS port data
relocation");
> +
> if (flags & VIR_MIGRATE_PERSIST_DEST) {
> virDomainDefPtr vmdef;
> if (vm->persistent)
In general ACK to the idea, but I think we should try to setup a more
general framework so that other types of network connections can plug in
their own data without needing to start over from scratch (and also to
isolate things like calling "ovs-vsctl" to virnetdevopenvswitch.c).
I would like to hear others' opinions on whether or not they think it's
reasonable to include the entire <network> live XML in the cookie, or if
they think that's going beyond the bounds of the intended purpose of the
migration cookie. I don't know if we should go all the way in that
direction, but possibly we should at least make it a *subset* of the
full domain <network> xml (as is done with the <graphics> cookie).