On Thu, May 19, 2011 at 07:24:23AM -0400, Daniel P. Berrange wrote:
Some lock managers associate state with leases, allowing a process
to temporarily release its leases, and re-acquire them later, safe
in the knowledge that no other process has acquired + released the
leases in between.
This is already used between suspend/resume operations, and must
also be used across migration. This passes the lockstate in the
migration cookie. If the lock manager uses lockstate, then it
becomes compulsory to use the migration v3 protocol to get the
cookie support.
* src/qemu/qemu_driver.c: Validate that migration v2 protocol is
not used if lock manager needs state transfer
* src/qemu/qemu_migration.c: Transfer lock state in migration
cookie XML
---
src/qemu/qemu_driver.c | 27 +++++++++-
src/qemu/qemu_migration.c | 119 +++++++++++++++++++++++++++++++++++++++++----
2 files changed, 134 insertions(+), 12 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 18233b7..6d4a6f4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5846,6 +5846,8 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
VIR_MIGRATE_NON_SHARED_DISK |
VIR_MIGRATE_NON_SHARED_INC, -1);
+ qemuDriverLock(driver);
+
if (!dom_xml) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("no domain XML passed"));
@@ -5862,13 +5864,19 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
goto cleanup;
}
- qemuDriverLock(driver);
+ if (virLockManagerPluginUsesState(driver->lockManager)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Cannot use migrate v2 protocol with lock manager
%s"),
+ virLockManagerPluginGetName(driver->lockManager));
+ goto cleanup;
+ }
+
ret = qemuMigrationPrepareTunnel(driver, dconn,
NULL, 0, NULL, NULL, /* No cookies in v2 */
st, dname, dom_xml);
- qemuDriverUnlock(driver);
cleanup:
+ qemuDriverUnlock(driver);
return ret;
}
@@ -5902,6 +5910,14 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn,
*uri_out = NULL;
qemuDriverLock(driver);
+
+ if (virLockManagerPluginUsesState(driver->lockManager)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Cannot use migrate v2 protocol with lock manager
%s"),
+ virLockManagerPluginGetName(driver->lockManager));
+ goto cleanup;
+ }
+
if (flags & VIR_MIGRATE_TUNNELLED) {
/* this is a logical error; we never should have gotten here with
* VIR_MIGRATE_TUNNELLED set
@@ -5956,6 +5972,13 @@ qemudDomainMigratePerform (virDomainPtr dom,
VIR_MIGRATE_NON_SHARED_INC, -1);
qemuDriverLock(driver);
+ if (virLockManagerPluginUsesState(driver->lockManager)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Cannot use migrate v2 protocol with lock manager
%s"),
+ virLockManagerPluginGetName(driver->lockManager));
+ goto cleanup;
+ }
+
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
if (!vm) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 9e01923..72a113a 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -41,6 +41,7 @@
#include "datatypes.h"
#include "fdstream.h"
#include "uuid.h"
+#include "locking/domain_lock.h"
#define VIR_FROM_THIS VIR_FROM_QEMU
@@ -49,6 +50,7 @@
enum qemuMigrationCookieFlags {
QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
+ QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
QEMU_MIGRATION_COOKIE_FLAG_LAST
};
@@ -56,10 +58,11 @@ enum qemuMigrationCookieFlags {
VIR_ENUM_DECL(qemuMigrationCookieFlag);
VIR_ENUM_IMPL(qemuMigrationCookieFlag,
QEMU_MIGRATION_COOKIE_FLAG_LAST,
- "graphics");
+ "graphics", "lockstate");
enum qemuMigrationCookieFeatures {
QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
+ QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 <<
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
};
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
@@ -86,6 +89,10 @@ struct _qemuMigrationCookie {
unsigned char uuid[VIR_UUID_BUFLEN];
char *name;
+ /* If (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) */
+ char *lockState;
+ char *lockDriver;
+
/* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */
qemuMigrationCookieGraphicsPtr graphics;
};
@@ -110,6 +117,8 @@ static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
VIR_FREE(mig->hostname);
VIR_FREE(mig->name);
+ VIR_FREE(mig->lockState);
+ VIR_FREE(mig->lockDriver);
VIR_FREE(mig);
}
@@ -275,6 +284,41 @@ qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig,
}
+static int
+qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig,
+ struct qemud_driver *driver,
+ virDomainObjPtr dom)
+{
+ qemuDomainObjPrivatePtr priv = dom->privateData;
+
+ if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Migration lockstate data already present"));
+ return -1;
+ }
+
+ if (virDomainObjGetState(dom, NULL) == VIR_DOMAIN_PAUSED) {
+ if (priv->lockState &&
+ !(mig->lockState = strdup(priv->lockState)))
+ return -1;
+ } else {
+ if (virDomainLockProcessInquire(driver->lockManager, dom,
&mig->lockState) < 0)
+ return -1;
+ }
+
+ if (!(mig->lockDriver =
strdup(virLockManagerPluginGetName(driver->lockManager)))) {
+ VIR_FREE(mig->lockState);
+ return -1;
+ }
+
+ mig->flags |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
+ mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
+
+ return 0;
+}
+
+
+
static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf,
qemuMigrationCookieGraphicsPtr grap)
{
@@ -319,6 +363,15 @@ static void qemuMigrationCookieXMLFormat(virBufferPtr buf,
mig->graphics)
qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics);
+ if ((mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
+ mig->lockState) {
+ virBufferAsprintf(buf, " <lockstate driver='%s'>\n",
+ mig->lockDriver);
+ virBufferAsprintf(buf, " <leases>%s</leases>\n",
+ mig->lockState);
+ virBufferAddLit(buf, " </lockstate>\n");
+ }
+
virBufferAddLit(buf, "</qemu-migration>\n");
}
@@ -498,6 +551,18 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
(!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt))))
goto error;
+ if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
+ mig->lockDriver = virXPathString("string(./lockstate[1]/@driver)",
ctxt);
+ if (!mig->lockDriver) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Missing lock driver name in migration
cookie"));
+ goto error;
+ }
+ mig->lockState = virXPathString("string(./lockstate[1]/leases[1])",
ctxt);
+ if (mig->lockState && STREQ(mig->lockState, ""))
+ VIR_FREE(mig->lockState);
+ }
+
return 0;
error:
@@ -558,6 +623,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
qemuMigrationCookieAddGraphics(mig, driver, dom) < 0)
return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE &&
+ qemuMigrationCookieAddLockstate(mig, driver, dom) < 0)
+ return -1;
+
if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig)))
return -1;
@@ -570,7 +639,8 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
static qemuMigrationCookiePtr
-qemuMigrationEatCookie(virDomainObjPtr dom,
+qemuMigrationEatCookie(struct qemud_driver *driver,
+ virDomainObjPtr dom,
const char *cookiein,
int cookieinlen,
int flags)
@@ -596,6 +666,17 @@ qemuMigrationEatCookie(virDomainObjPtr dom,
flags) < 0)
goto error;
+ if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
+ if (STRNEQ(mig->lockDriver,
+ virLockManagerPluginGetName(driver->lockManager))) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Source host lock driver %s different from target
%s"),
+ mig->lockDriver,
+ virLockManagerPluginGetName(driver->lockManager));
+ goto error;
+ }
+ }
+
return mig;
error:
@@ -885,12 +966,12 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
if (!qemuMigrationIsAllowed(vm->def))
goto cleanup;
- if (!(mig = qemuMigrationEatCookie(vm, NULL, 0, 0)))
+ if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
goto cleanup;
if (qemuMigrationBakeCookie(mig, driver, vm,
cookieout, cookieoutlen,
- 0) < 0)
+ QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
goto cleanup;
rv = qemuDomainFormatXML(driver, vm,
@@ -964,7 +1045,8 @@ qemuMigrationPrepareTunnel(struct qemud_driver *driver,
def = NULL;
priv = vm->privateData;
- if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0)))
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
+ QEMU_MIGRATION_COOKIE_LOCKSTATE)))
goto cleanup;
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
@@ -1193,7 +1275,8 @@ qemuMigrationPrepareDirect(struct qemud_driver *driver,
def = NULL;
priv = vm->privateData;
- if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0)))
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
+ QEMU_MIGRATION_COOKIE_LOCKSTATE)))
goto cleanup;
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
@@ -1287,7 +1370,15 @@ static int doNativeMigrate(struct qemud_driver *driver,
unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
qemuMigrationCookiePtr mig = NULL;
- if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen,
+ if (virLockManagerPluginUsesState(driver->lockManager) &&
+ !cookieout) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Migration with lock driver %s requires cookie
support"),
+ virLockManagerPluginGetName(driver->lockManager));
+ return -1;
+ }
+
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
QEMU_MIGRATION_COOKIE_GRAPHICS)))
goto cleanup;
@@ -1473,6 +1564,14 @@ static int doTunnelMigrate(struct qemud_driver *driver,
qemuMigrationCookiePtr mig = NULL;
qemuMigrationIOThreadPtr iothread = NULL;
+ if (virLockManagerPluginUsesState(driver->lockManager) &&
+ !cookieout) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Migration with lock driver %s requires cookie
support"),
+ virLockManagerPluginGetName(driver->lockManager));
+ return -1;
+ }
+
if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
qemuReportError(VIR_ERR_OPERATION_FAILED,
@@ -1532,7 +1631,7 @@ static int doTunnelMigrate(struct qemud_driver *driver,
goto cleanup;
}
- if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen,
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
QEMU_MIGRATION_COOKIE_GRAPHICS)))
goto cleanup;
@@ -2192,7 +2291,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
priv->jobActive = QEMU_JOB_NONE;
memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
- if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0)))
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
goto cleanup;
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
@@ -2313,7 +2412,7 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
virDomainEventPtr event = NULL;
int rv = -1;
- if (!(mig = qemuMigrationEatCookie(vm, cookiein, cookieinlen, 0)))
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
return -1;
if (!skipJob &&
ACK, looks fine.
But I'm wondering what's the scenario where a networked resource happen
to be used from 2 different node by different domains, and one get
migrated to the same box, the lock manager shoudl detect the problem
migration will fail, but the broken "double use" will continue, right ?
Daniel
--
Daniel Veillard | libxml Gnome XML XSLT toolkit
http://xmlsoft.org/
daniel(a)veillard.com | Rpmfind RPM search engine
http://rpmfind.net/
http://veillard.com/ | virtualization library
http://libvirt.org/