The introduction of the v3 migration protocol, along with
support for migration cookies, will significantly expand
the size of the migration code. Move it all to a separate
file to make it more manageable
The functions are not moved 100%. The API entry points
remain in the main QEMU driver, but once the public
virDomainPtr is resolved to the internal virDomainObjPtr,
all following code is moved.
This will allow the new v3 API entry points to call into the
same shared internal migration functions
* src/qemu/qemu_domain.c, src/qemu/qemu_domain.h: Add
qemuDomainFormatXML helper method
* src/qemu/qemu_driver.c: Remove all migration code
* src/qemu/qemu_migration.c, src/qemu/qemu_migration.h: Add
all migration code.
---
po/POTFILES.in | 1 +
src/Makefile.am | 3 +-
src/qemu/qemu_domain.c | 39 ++
src/qemu/qemu_domain.h | 4 +
src/qemu/qemu_driver.c | 1297 ++-------------------------------------------
src/qemu/qemu_migration.c | 1295 ++++++++++++++++++++++++++++++++++++++++++++
src/qemu/qemu_migration.h | 63 +++
7 files changed, 1445 insertions(+), 1257 deletions(-)
create mode 100644 src/qemu/qemu_migration.c
create mode 100644 src/qemu/qemu_migration.h
diff --git a/po/POTFILES.in b/po/POTFILES.in
index 343fe5d..2256cb2 100644
--- a/po/POTFILES.in
+++ b/po/POTFILES.in
@@ -58,6 +58,7 @@ src/qemu/qemu_domain.c
src/qemu/qemu_driver.c
src/qemu/qemu_hostdev.c
src/qemu/qemu_hotplug.c
+src/qemu/qemu_migration.c
src/qemu/qemu_monitor.c
src/qemu/qemu_monitor_json.c
src/qemu/qemu_monitor_text.c
diff --git a/src/Makefile.am b/src/Makefile.am
index 15a4e8c..36e08a0 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -281,7 +281,8 @@ QEMU_DRIVER_SOURCES = \
qemu/qemu_hostdev.c qemu/qemu_hostdev.h \
qemu/qemu_hotplug.c qemu/qemu_hotplug.h \
qemu/qemu_conf.c qemu/qemu_conf.h \
- qemu/qemu_process.c qemu/qemu_process.h \
+ qemu/qemu_process.c qemu/qemu_process.h \
+ qemu/qemu_migration.c qemu/qemu_migration.h \
qemu/qemu_monitor.c qemu/qemu_monitor.h \
qemu/qemu_monitor_text.c \
qemu/qemu_monitor_text.h \
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index e3163ab..8a2b9cc 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -30,6 +30,7 @@
#include "virterror_internal.h"
#include "c-ctype.h"
#include "event.h"
+#include "cpu/cpu.h"
#include <sys/time.h>
@@ -653,3 +654,41 @@ void qemuDomainObjExitRemoteWithDriver(struct qemud_driver *driver,
virDomainObjLock(obj);
virDomainObjUnref(obj);
}
+
+
+char *qemuDomainFormatXML(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ int flags)
+{
+ char *ret = NULL;
+ virCPUDefPtr cpu = NULL;
+ virDomainDefPtr def;
+ virCPUDefPtr def_cpu;
+
+ if ((flags & VIR_DOMAIN_XML_INACTIVE) && vm->newDef)
+ def = vm->newDef;
+ else
+ def = vm->def;
+ def_cpu = def->cpu;
+
+ /* Update guest CPU requirements according to host CPU */
+ if ((flags & VIR_DOMAIN_XML_UPDATE_CPU) && def_cpu &&
def_cpu->model) {
+ if (!driver->caps || !driver->caps->host.cpu) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("cannot get host CPU
capabilities"));
+ goto cleanup;
+ }
+
+ if (!(cpu = virCPUDefCopy(def_cpu))
+ || cpuUpdate(cpu, driver->caps->host.cpu))
+ goto cleanup;
+ def->cpu = cpu;
+ }
+
+ ret = virDomainDefFormat(def, flags);
+
+cleanup:
+ def->cpu = def_cpu;
+ virCPUDefFree(cpu);
+ return ret;
+}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 4333a78..ebb2050 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -107,4 +107,8 @@ void qemuDomainObjEnterRemoteWithDriver(struct qemud_driver *driver,
void qemuDomainObjExitRemoteWithDriver(struct qemud_driver *driver,
virDomainObjPtr obj);
+char *qemuDomainFormatXML(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ int flags);
+
#endif /* __QEMU_DOMAIN_H__ */
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 9cc6e89..21e88f8 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -57,6 +57,7 @@
#include "qemu_bridge_filter.h"
#include "qemu_audit.h"
#include "qemu_process.h"
+#include "qemu_migration.h"
#include "virterror_internal.h"
#include "logging.h"
@@ -1691,176 +1692,6 @@ cleanup:
}
-/** qemuDomainMigrateOffline:
- * Pause domain for non-live migration.
- */
-static int
-qemuDomainMigrateOffline(struct qemud_driver *driver,
- virDomainObjPtr vm)
-{
- int ret;
-
- ret = qemuProcessStopCPUs(driver, vm);
- if (ret == 0) {
- virDomainEventPtr event;
-
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_SUSPENDED,
- VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
- if (event)
- qemuDomainEventQueue(driver, event);
- }
-
- return ret;
-}
-
-
-static int
-qemuDomainWaitForMigrationComplete(struct qemud_driver *driver, virDomainObjPtr vm)
-{
- int ret = -1;
- int status;
- unsigned long long memProcessed;
- unsigned long long memRemaining;
- unsigned long long memTotal;
- qemuDomainObjPrivatePtr priv = vm->privateData;
-
- priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
-
- while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
- /* Poll every 50ms for progress & to allow cancellation */
- struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
- struct timeval now;
- int rc;
- const char *job;
-
- switch (priv->jobActive) {
- case QEMU_JOB_MIGRATION_OUT:
- job = _("migration job");
- break;
- case QEMU_JOB_SAVE:
- job = _("domain save job");
- break;
- case QEMU_JOB_DUMP:
- job = _("domain core dump job");
- break;
- default:
- job = _("job");
- }
-
-
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
- job, _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
- priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
- VIR_DEBUG0("Cancelling job at client request");
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- rc = qemuMonitorMigrateCancel(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (rc < 0) {
- VIR_WARN0("Unable to cancel job");
- }
- } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
- priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
- VIR_DEBUG0("Pausing domain for non-live migration");
- if (qemuDomainMigrateOffline(driver, vm) < 0)
- VIR_WARN0("Unable to pause domain");
- } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) {
- unsigned long long ms = priv->jobSignalsData.migrateDowntime;
-
- priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
- priv->jobSignalsData.migrateDowntime = 0;
- VIR_DEBUG("Setting migration downtime to %llums", ms);
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- rc = qemuMonitorSetMigrationDowntime(priv->mon, ms);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (rc < 0)
- VIR_WARN0("Unable to set migration downtime");
- }
-
- /* Repeat check because the job signals might have caused
- * guest to die
- */
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
- job, _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- rc = qemuMonitorGetMigrationStatus(priv->mon,
- &status,
- &memProcessed,
- &memRemaining,
- &memTotal);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
-
- if (rc < 0) {
- priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
- goto cleanup;
- }
-
- if (gettimeofday(&now, NULL) < 0) {
- priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
- virReportSystemError(errno, "%s",
- _("cannot get time of day"));
- goto cleanup;
- }
- priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart;
-
- switch (status) {
- case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
- priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- _("%s: %s"), job, _("is not active"));
- break;
-
- case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
- priv->jobInfo.dataTotal = memTotal;
- priv->jobInfo.dataRemaining = memRemaining;
- priv->jobInfo.dataProcessed = memProcessed;
-
- priv->jobInfo.memTotal = memTotal;
- priv->jobInfo.memRemaining = memRemaining;
- priv->jobInfo.memProcessed = memProcessed;
- break;
-
- case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
- priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED;
- ret = 0;
- break;
-
- case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
- priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- _("%s: %s"), job, _("unexpectedly
failed"));
- break;
-
- case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
- priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- _("%s: %s"), job, _("canceled by
client"));
- break;
- }
-
- virDomainObjUnlock(vm);
- qemuDriverUnlock(driver);
-
- nanosleep(&ts, NULL);
-
- qemuDriverLock(driver);
- virDomainObjLock(vm);
- }
-
-cleanup:
- return ret;
-}
-
-
#define QEMUD_SAVE_MAGIC "LibvirtQemudSave"
#define QEMUD_SAVE_VERSION 2
@@ -2161,7 +1992,7 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver,
virDomainPtr dom,
if (rc < 0)
goto endjob;
- rc = qemuDomainWaitForMigrationComplete(driver, vm);
+ rc = qemuMigrationWaitForCompletion(driver, vm);
if (rc < 0)
goto endjob;
@@ -2469,7 +2300,7 @@ static int doCoreDump(struct qemud_driver *driver,
if (ret < 0)
goto cleanup;
- ret = qemuDomainWaitForMigrationComplete(driver, vm);
+ ret = qemuMigrationWaitForCompletion(driver, vm);
if (ret < 0)
goto cleanup;
@@ -3605,44 +3436,6 @@ cleanup:
}
-static char *qemudVMDumpXML(struct qemud_driver *driver,
- virDomainObjPtr vm,
- int flags)
-{
- char *ret = NULL;
- virCPUDefPtr cpu = NULL;
- virDomainDefPtr def;
- virCPUDefPtr def_cpu;
-
- if ((flags & VIR_DOMAIN_XML_INACTIVE) && vm->newDef)
- def = vm->newDef;
- else
- def = vm->def;
- def_cpu = def->cpu;
-
- /* Update guest CPU requirements according to host CPU */
- if ((flags & VIR_DOMAIN_XML_UPDATE_CPU) && def_cpu &&
def_cpu->model) {
- if (!driver->caps || !driver->caps->host.cpu) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("cannot get host CPU
capabilities"));
- goto cleanup;
- }
-
- if (!(cpu = virCPUDefCopy(def_cpu))
- || cpuUpdate(cpu, driver->caps->host.cpu))
- goto cleanup;
- def->cpu = cpu;
- }
-
- ret = virDomainDefFormat(def, flags);
-
-cleanup:
- def->cpu = def_cpu;
- virCPUDefFree(cpu);
- return ret;
-}
-
-
static char *qemudDomainDumpXML(virDomainPtr dom,
int flags) {
struct qemud_driver *driver = dom->conn->privateData;
@@ -3688,7 +3481,7 @@ static char *qemudDomainDumpXML(virDomainPtr dom,
}
}
- ret = qemudVMDumpXML(driver, vm, flags);
+ ret = qemuDomainFormatXML(driver, vm, flags);
cleanup:
if (vm)
@@ -5552,18 +5345,6 @@ qemuDomainEventDeregisterAny(virConnectPtr conn,
/* Migration support. */
-static bool ATTRIBUTE_NONNULL(1)
-qemuDomainIsMigratable(virDomainDefPtr def)
-{
- if (def->nhostdevs > 0) {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("Domain with assigned host devices cannot be
migrated"));
- return false;
- }
-
- return true;
-}
-
/* Prepare is the first step, and it runs on the destination host.
*
* This version starts an empty VM listening on a localhost TCP port, and
@@ -5578,24 +5359,8 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
const char *dom_xml)
{
struct qemud_driver *driver = dconn->privateData;
- virDomainDefPtr def = NULL;
- virDomainObjPtr vm = NULL;
- char *migrateFrom;
- virDomainEventPtr event = NULL;
int ret = -1;
- int internalret;
- char *unixfile = NULL;
- unsigned long long qemuCmdFlags;
- qemuDomainObjPrivatePtr priv = NULL;
- struct timeval now;
-
- if (gettimeofday(&now, NULL) < 0) {
- virReportSystemError(errno, "%s",
- _("cannot get time of day"));
- return -1;
- }
- qemuDriverLock(driver);
if (!dom_xml) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("no domain XML passed"));
@@ -5612,140 +5377,12 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
goto cleanup;
}
- /* Parse the domain XML. */
- if (!(def = virDomainDefParseString(driver->caps, dom_xml,
- VIR_DOMAIN_XML_INACTIVE))) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("failed to parse XML, libvirt version may
be "
- "different between source and destination
host"));
- goto cleanup;
- }
-
- if (!qemuDomainIsMigratable(def))
- goto cleanup;
-
- /* Target domain name, maybe renamed. */
- if (dname) {
- VIR_FREE(def->name);
- def->name = strdup(dname);
- if (def->name == NULL)
- goto cleanup;
- }
-
- if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
- goto cleanup;
-
- if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains,
- def, true))) {
- /* virDomainAssignDef already set the error */
- goto cleanup;
- }
- def = NULL;
- priv = vm->privateData;
-
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
- goto cleanup;
- priv->jobActive = QEMU_JOB_MIGRATION_OUT;
-
- /* Domain starts inactive, even if the domain XML had an id field. */
- vm->def->id = -1;
-
- if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.dest.%s",
- driver->libDir, vm->def->name) < 0) {
- virReportOOMError();
- goto endjob;
- }
- unlink(unixfile);
-
- /* check that this qemu version supports the interactive exec */
- if (qemuCapsExtractVersionInfo(vm->def->emulator, NULL, &qemuCmdFlags) <
0) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR,
- _("Cannot determine QEMU argv syntax %s"),
- vm->def->emulator);
- goto endjob;
- }
- if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX)
- internalret = virAsprintf(&migrateFrom, "unix:%s", unixfile);
- else if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC)
- internalret = virAsprintf(&migrateFrom, "exec:nc -U -l %s",
unixfile);
- else {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("Destination qemu is too old to support
tunnelled migration"));
- goto endjob;
- }
- if (internalret < 0) {
- virReportOOMError();
- goto endjob;
- }
- /* Start the QEMU daemon, with the same command-line arguments plus
- * -incoming unix:/path/to/file or exec:nc -U /path/to/file
- */
- internalret = qemuProcessStart(dconn, driver, vm, migrateFrom, true,
- -1, NULL, VIR_VM_OP_MIGRATE_IN_START);
- VIR_FREE(migrateFrom);
- if (internalret < 0) {
- qemuDomainStartAudit(vm, "migrated", false);
- /* Note that we don't set an error here because qemuProcessStart
- * should have already done that.
- */
- if (!vm->persistent) {
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
- }
- goto endjob;
- }
-
- if (virFDStreamConnectUNIX(st,
- unixfile,
- false) < 0) {
- qemuDomainStartAudit(vm, "migrated", false);
- qemuProcessStop(driver, vm, 0);
- if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
- }
- virReportSystemError(errno,
- _("cannot open unix socket '%s' for tunnelled
migration"),
- unixfile);
- goto endjob;
- }
-
- qemuDomainStartAudit(vm, "migrated", true);
-
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_STARTED,
- VIR_DOMAIN_EVENT_STARTED_MIGRATED);
- ret = 0;
-
-endjob:
- if (vm &&
- qemuDomainObjEndJob(vm) == 0)
- vm = NULL;
-
- /* We set a fake job active which is held across
- * API calls until the finish() call. This prevents
- * any other APIs being invoked while incoming
- * migration is taking place
- */
- if (vm &&
- virDomainObjIsActive(vm)) {
- priv->jobActive = QEMU_JOB_MIGRATION_IN;
- priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
- priv->jobStart = timeval_to_ms(now);
- }
+ qemuDriverLock(driver);
+ ret = qemuMigrationPrepareTunnel(driver, dconn, st,
+ dname, dom_xml);
+ qemuDriverUnlock(driver);
cleanup:
- virDomainDefFree(def);
- if (unixfile)
- unlink(unixfile);
- VIR_FREE(unixfile);
- if (vm)
- virDomainObjUnlock(vm);
- if (event)
- qemuDomainEventQueue(driver, event);
- qemuDriverUnlock(driver);
return ret;
}
@@ -5764,25 +5401,8 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn,
unsigned long resource ATTRIBUTE_UNUSED,
const char *dom_xml)
{
- static int port = 0;
struct qemud_driver *driver = dconn->privateData;
- virDomainDefPtr def = NULL;
- virDomainObjPtr vm = NULL;
- int this_port;
- char *hostname = NULL;
- char migrateFrom [64];
- const char *p;
- virDomainEventPtr event = NULL;
int ret = -1;
- int internalret;
- qemuDomainObjPrivatePtr priv = NULL;
- struct timeval now;
-
- if (gettimeofday(&now, NULL) < 0) {
- virReportSystemError(errno, "%s",
- _("cannot get time of day"));
- return -1;
- }
virCheckFlags(VIR_MIGRATE_LIVE |
VIR_MIGRATE_PEER2PEER |
@@ -5811,790 +5431,58 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn,
goto cleanup;
}
- /* The URI passed in may be NULL or a string "tcp://somehostname:port".
- *
- * If the URI passed in is NULL then we allocate a port number
- * from our pool of port numbers and return a URI of
- * "tcp://ourhostname:port".
- *
- * If the URI passed in is not NULL then we try to parse out the
- * port number and use that (note that the hostname is assumed
- * to be a correct hostname which refers to the target machine).
- */
- if (uri_in == NULL) {
- this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
- if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0;
+ ret = qemuMigrationPrepareDirect(driver, dconn,
+ uri_in, uri_out,
+ dname, dom_xml);
- /* Get hostname */
- if ((hostname = virGetHostname(NULL)) == NULL)
- goto cleanup;
+cleanup:
+ qemuDriverUnlock(driver);
+ return ret;
+}
- if (STRPREFIX(hostname, "localhost")) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("hostname on destination resolved to localhost, but
migration requires an FQDN"));
- goto cleanup;
- }
- /* XXX this really should have been a properly well-formed
- * URI, but we can't add in tcp:// now without breaking
- * compatability with old targets. We at least make the
- * new targets accept both syntaxes though.
- */
- /* Caller frees */
- internalret = virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port);
- if (internalret < 0) {
- virReportOOMError();
- goto cleanup;
- }
- } else {
- /* Check the URI starts with "tcp:". We will escape the
- * URI when passing it to the qemu monitor, so bad
- * characters in hostname part don't matter.
- */
- if (!STRPREFIX (uri_in, "tcp:")) {
- qemuReportError (VIR_ERR_INVALID_ARG,
- "%s", _("only tcp URIs are supported for
KVM/QEMU migrations"));
- goto cleanup;
- }
+/* Perform is the second step, and it runs on the source host. */
+static int
+qemudDomainMigratePerform (virDomainPtr dom,
+ const char *cookie ATTRIBUTE_UNUSED,
+ int cookielen ATTRIBUTE_UNUSED,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ struct qemud_driver *driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ int ret = -1;
- /* Get the port number. */
- p = strrchr (uri_in, ':');
- if (p == strchr(uri_in, ':')) {
- /* Generate a port */
- this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
- if (port == QEMUD_MIGRATION_NUM_PORTS)
- port = 0;
+ virCheckFlags(VIR_MIGRATE_LIVE |
+ VIR_MIGRATE_PEER2PEER |
+ VIR_MIGRATE_TUNNELLED |
+ VIR_MIGRATE_PERSIST_DEST |
+ VIR_MIGRATE_UNDEFINE_SOURCE |
+ VIR_MIGRATE_PAUSED |
+ VIR_MIGRATE_NON_SHARED_DISK |
+ VIR_MIGRATE_NON_SHARED_INC, -1);
- /* Caller frees */
- if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0) {
- virReportOOMError();
- goto cleanup;
- }
+ qemuDriverLock(driver);
+ vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ if (!vm) {
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+ virUUIDFormat(dom->uuid, uuidstr);
+ qemuReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s'"),
uuidstr);
+ goto cleanup;
+ }
- } else {
- p++; /* definitely has a ':' in it, see above */
- this_port = virParseNumber (&p);
- if (this_port == -1 || p-uri_in != strlen (uri_in)) {
- qemuReportError(VIR_ERR_INVALID_ARG,
- "%s", _("URI ended with incorrect
':port'"));
- goto cleanup;
- }
- }
- }
-
- if (*uri_out)
- VIR_DEBUG("Generated uri_out=%s", *uri_out);
-
- /* Parse the domain XML. */
- if (!(def = virDomainDefParseString(driver->caps, dom_xml,
- VIR_DOMAIN_XML_INACTIVE))) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("failed to parse XML"));
- goto cleanup;
- }
-
- if (!qemuDomainIsMigratable(def))
- goto cleanup;
-
- /* Target domain name, maybe renamed. */
- if (dname) {
- VIR_FREE(def->name);
- def->name = strdup(dname);
- if (def->name == NULL)
- goto cleanup;
- }
-
- if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
- goto cleanup;
-
- if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains,
- def, true))) {
- /* virDomainAssignDef already set the error */
- goto cleanup;
- }
- def = NULL;
- priv = vm->privateData;
-
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
- goto cleanup;
- priv->jobActive = QEMU_JOB_MIGRATION_OUT;
-
- /* Domain starts inactive, even if the domain XML had an id field. */
- vm->def->id = -1;
-
- /* Start the QEMU daemon, with the same command-line arguments plus
- * -incoming tcp:0.0.0.0:port
- */
- snprintf (migrateFrom, sizeof (migrateFrom), "tcp:0.0.0.0:%d", this_port);
- if (qemuProcessStart(dconn, driver, vm, migrateFrom, true,
- -1, NULL, VIR_VM_OP_MIGRATE_IN_START) < 0) {
- qemuDomainStartAudit(vm, "migrated", false);
- /* Note that we don't set an error here because qemuProcessStart
- * should have already done that.
- */
- if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
- }
- goto endjob;
- }
-
- qemuDomainStartAudit(vm, "migrated", true);
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_STARTED,
- VIR_DOMAIN_EVENT_STARTED_MIGRATED);
- ret = 0;
-
-endjob:
- if (vm &&
- qemuDomainObjEndJob(vm) == 0)
- vm = NULL;
-
- /* We set a fake job active which is held across
- * API calls until the finish() call. This prevents
- * any other APIs being invoked while incoming
- * migration is taking place
- */
- if (vm &&
- virDomainObjIsActive(vm)) {
- priv->jobActive = QEMU_JOB_MIGRATION_IN;
- priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
- priv->jobStart = timeval_to_ms(now);
- }
-
-cleanup:
- VIR_FREE(hostname);
- virDomainDefFree(def);
- if (ret != 0)
- VIR_FREE(*uri_out);
- if (vm)
- virDomainObjUnlock(vm);
- if (event)
- qemuDomainEventQueue(driver, event);
- qemuDriverUnlock(driver);
- return ret;
-
-}
-
-
-/* Perform migration using QEMU's native TCP migrate support,
- * not encrypted obviously
- */
-static int doNativeMigrate(struct qemud_driver *driver,
- virDomainObjPtr vm,
- const char *uri,
- unsigned int flags,
- const char *dname ATTRIBUTE_UNUSED,
- unsigned long resource)
-{
- int ret = -1;
- xmlURIPtr uribits = NULL;
- qemuDomainObjPrivatePtr priv = vm->privateData;
- unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
-
- /* Issue the migrate command. */
- if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://"))
{
- /* HACK: source host generates bogus URIs, so fix them up */
- char *tmpuri;
- if (virAsprintf(&tmpuri, "tcp://%s", uri +
strlen("tcp:")) < 0) {
- virReportOOMError();
- goto cleanup;
- }
- uribits = xmlParseURI(tmpuri);
- VIR_FREE(tmpuri);
- } else {
- uribits = xmlParseURI(uri);
- }
- if (!uribits) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR,
- _("cannot parse URI %s"), uri);
- goto cleanup;
- }
-
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (resource > 0 &&
- qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- goto cleanup;
- }
-
- if (flags & VIR_MIGRATE_NON_SHARED_DISK)
- background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
-
- if (flags & VIR_MIGRATE_NON_SHARED_INC)
- background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
-
- if (qemuMonitorMigrateToHost(priv->mon, background_flags, uribits->server,
- uribits->port) < 0) {
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- goto cleanup;
- }
- qemuDomainObjExitMonitorWithDriver(driver, vm);
-
- if (qemuDomainWaitForMigrationComplete(driver, vm) < 0)
- goto cleanup;
-
- ret = 0;
-
-cleanup:
- xmlFreeURI(uribits);
- return ret;
-}
-
-
-#define TUNNEL_SEND_BUF_SIZE 65536
-
-static int doTunnelSendAll(virStreamPtr st,
- int sock)
-{
- char *buffer;
- int nbytes = TUNNEL_SEND_BUF_SIZE;
-
- if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0) {
- virReportOOMError();
- virStreamAbort(st);
- return -1;
- }
-
- /* XXX should honour the 'resource' parameter here */
- for (;;) {
- nbytes = saferead(sock, buffer, nbytes);
- if (nbytes < 0) {
- virReportSystemError(errno, "%s",
- _("tunnelled migration failed to read from
qemu"));
- virStreamAbort(st);
- VIR_FREE(buffer);
- return -1;
- }
- else if (nbytes == 0)
- /* EOF; get out of here */
- break;
-
- if (virStreamSend(st, buffer, nbytes) < 0) {
- qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
- _("Failed to write migration data to remote
libvirtd"));
- VIR_FREE(buffer);
- return -1;
- }
- }
-
- VIR_FREE(buffer);
-
- if (virStreamFinish(st) < 0)
- /* virStreamFinish set the error for us */
- return -1;
-
- return 0;
-}
-
-static int doTunnelMigrate(virDomainPtr dom,
- struct qemud_driver *driver,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *dom_xml,
- const char *uri,
- unsigned long flags,
- const char *dname,
- unsigned long resource)
-{
- qemuDomainObjPrivatePtr priv = vm->privateData;
- int client_sock = -1;
- int qemu_sock = -1;
- struct sockaddr_un sa_qemu, sa_client;
- socklen_t addrlen;
- virDomainPtr ddomain = NULL;
- int retval = -1;
- virStreamPtr st = NULL;
- char *unixfile = NULL;
- int internalret;
- unsigned long long qemuCmdFlags;
- int status;
- unsigned long long transferred, remaining, total;
- unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
-
- /*
- * The order of operations is important here to avoid touching
- * the source VM until we are very sure we can successfully
- * start the migration operation.
- *
- * 1. setup local support infrastructure (eg sockets)
- * 2. setup destination fully
- * 3. start migration on source
- */
-
-
- /* Stage 1. setup local support infrastructure */
-
- if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s",
- driver->libDir, vm->def->name) < 0) {
- virReportOOMError();
- goto cleanup;
- }
-
- qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0);
- if (qemu_sock < 0) {
- virReportSystemError(errno, "%s",
- _("cannot open tunnelled migration socket"));
- goto cleanup;
- }
- memset(&sa_qemu, 0, sizeof(sa_qemu));
- sa_qemu.sun_family = AF_UNIX;
- if (virStrcpy(sa_qemu.sun_path, unixfile,
- sizeof(sa_qemu.sun_path)) == NULL) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR,
- _("Unix socket '%s' too big for destination"),
- unixfile);
- goto cleanup;
- }
- unlink(unixfile);
- if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) {
- virReportSystemError(errno,
- _("Cannot bind to unix socket '%s' for
tunnelled migration"),
- unixfile);
- goto cleanup;
- }
- if (listen(qemu_sock, 1) < 0) {
- virReportSystemError(errno,
- _("Cannot listen on unix socket '%s' for
tunnelled migration"),
- unixfile);
- goto cleanup;
- }
-
- if (chown(unixfile, qemu_driver->user, qemu_driver->group) < 0) {
- virReportSystemError(errno,
- _("Cannot change unix socket '%s'
owner"),
- unixfile);
- goto cleanup;
- }
-
- /* check that this qemu version supports the unix migration */
- if (qemuCapsExtractVersionInfo(vm->def->emulator, NULL, &qemuCmdFlags) <
0) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR,
- _("Cannot extract Qemu version from '%s'"),
- vm->def->emulator);
- goto cleanup;
- }
-
- if (!(qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX) &&
- !(qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC)) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("Source qemu is too old to support
tunnelled migration"));
- goto cleanup;
- }
-
-
- /* Stage 2. setup destination fully
- *
- * Once stage 2 has completed successfully, we *must* call finish
- * to cleanup the target whether we succeed or fail
- */
- st = virStreamNew(dconn, 0);
- if (st == NULL)
- /* virStreamNew only fails on OOM, and it reports the error itself */
- goto cleanup;
-
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st,
- flags, dname,
- resource, dom_xml);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
-
- if (internalret < 0)
- /* domainMigratePrepareTunnel sets the error for us */
- goto cleanup;
-
- /* the domain may have shutdown or crashed while we had the locks dropped
- * in qemuDomainObjEnterRemoteWithDriver, so check again
- */
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- /* 3. start migration on source */
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (flags & VIR_MIGRATE_NON_SHARED_DISK)
- background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
- if (flags & VIR_MIGRATE_NON_SHARED_INC)
- background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
- if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX){
- internalret = qemuMonitorMigrateToUnix(priv->mon, background_flags,
- unixfile);
- }
- else if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC) {
- const char *args[] = { "nc", "-U", unixfile, NULL };
- internalret = qemuMonitorMigrateToCommand(priv->mon,
QEMU_MONITOR_MIGRATE_BACKGROUND, args);
- } else {
- internalret = -1;
- }
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- if (internalret < 0) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("tunnelled migration monitor command
failed"));
- goto finish;
- }
-
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- /* From this point onwards we *must* call cancel to abort the
- * migration on source if anything goes wrong */
-
- /* it is also possible that the migrate didn't fail initially, but
- * rather failed later on. Check the output of "info migrate"
- */
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (qemuMonitorGetMigrationStatus(priv->mon,
- &status,
- &transferred,
- &remaining,
- &total) < 0) {
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- goto cancel;
- }
- qemuDomainObjExitMonitorWithDriver(driver, vm);
-
- if (status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s",_("migrate failed"));
- goto cancel;
- }
-
- addrlen = sizeof(sa_client);
- while ((client_sock = accept(qemu_sock, (struct sockaddr *)&sa_client,
&addrlen)) < 0) {
- if (errno == EAGAIN || errno == EINTR)
- continue;
- virReportSystemError(errno, "%s",
- _("tunnelled migration failed to accept from
qemu"));
- goto cancel;
- }
-
- retval = doTunnelSendAll(st, client_sock);
-
-cancel:
- if (retval != 0 && virDomainObjIsActive(vm)) {
- qemuDomainObjEnterMonitorWithDriver(driver, vm);
- qemuMonitorMigrateCancel(priv->mon);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
-
-finish:
- dname = dname ? dname : dom->name;
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- ddomain = dconn->driver->domainMigrateFinish2
- (dconn, dname, NULL, 0, uri, flags, retval);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
-
-cleanup:
- VIR_FORCE_CLOSE(client_sock);
- VIR_FORCE_CLOSE(qemu_sock);
-
- if (ddomain)
- virUnrefDomain(ddomain);
-
- if (unixfile) {
- unlink(unixfile);
- VIR_FREE(unixfile);
- }
-
- if (st)
- /* don't call virStreamFree(), because that resets any pending errors */
- virUnrefStream(st);
- return retval;
-}
-
-
-/* This is essentially a simplified re-impl of
- * virDomainMigrateVersion2 from libvirt.c, but running in source
- * libvirtd context, instead of client app context */
-static int doNonTunnelMigrate(virDomainPtr dom,
- struct qemud_driver *driver,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *dom_xml,
- const char *uri ATTRIBUTE_UNUSED,
- unsigned long flags,
- const char *dname,
- unsigned long resource)
-{
- virDomainPtr ddomain = NULL;
- int retval = -1;
- char *uri_out = NULL;
- int rc;
-
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- /* NB we don't pass 'uri' into this, since that's the libvirtd
- * URI in this context - so we let dest pick it */
- rc = dconn->driver->domainMigratePrepare2(dconn,
- NULL, /* cookie */
- 0, /* cookielen */
- NULL, /* uri */
- &uri_out,
- flags, dname,
- resource, dom_xml);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
- if (rc < 0)
- /* domainMigratePrepare2 sets the error for us */
- goto cleanup;
-
- /* the domain may have shutdown or crashed while we had the locks dropped
- * in qemuDomainObjEnterRemoteWithDriver, so check again
- */
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- if (uri_out == NULL) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("domainMigratePrepare2 did not set uri"));
- goto cleanup;
- }
-
- if (doNativeMigrate(driver, vm, uri_out, flags, dname, resource) < 0)
- goto finish;
-
- retval = 0;
-
-finish:
- dname = dname ? dname : dom->name;
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- ddomain = dconn->driver->domainMigrateFinish2
- (dconn, dname, NULL, 0, uri_out, flags, retval);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
-
- if (ddomain)
- virUnrefDomain(ddomain);
-
-cleanup:
- return retval;
-}
-
-
-static int doPeer2PeerMigrate(virDomainPtr dom,
- struct qemud_driver *driver,
- virDomainObjPtr vm,
- const char *uri,
- unsigned long flags,
- const char *dname,
- unsigned long resource)
-{
- int ret = -1;
- virConnectPtr dconn = NULL;
- char *dom_xml;
- bool p2p;
-
- /* the order of operations is important here; we make sure the
- * destination side is completely setup before we touch the source
- */
-
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- dconn = virConnectOpen(uri);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
- if (dconn == NULL) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- _("Failed to connect to remote libvirt URI %s"), uri);
- return -1;
- }
-
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
- VIR_DRV_FEATURE_MIGRATION_P2P);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
- if (!p2p) {
- qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
- _("Destination libvirt does not support peer-to-peer
migration protocol"));
- goto cleanup;
- }
-
- /* domain may have been stopped while we were talking to remote daemon */
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- dom_xml = qemudVMDumpXML(driver, vm,
- VIR_DOMAIN_XML_SECURE |
- VIR_DOMAIN_XML_UPDATE_CPU);
- if (!dom_xml) {
- qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("failed to get domain xml"));
- goto cleanup;
- }
-
- if (flags & VIR_MIGRATE_TUNNELLED)
- ret = doTunnelMigrate(dom, driver, dconn, vm, dom_xml, uri, flags, dname,
resource);
- else
- ret = doNonTunnelMigrate(dom, driver, dconn, vm, dom_xml, uri, flags, dname,
resource);
-
-cleanup:
- VIR_FREE(dom_xml);
- /* don't call virConnectClose(), because that resets any pending errors */
- qemuDomainObjEnterRemoteWithDriver(driver, vm);
- virUnrefConnect(dconn);
- qemuDomainObjExitRemoteWithDriver(driver, vm);
-
- return ret;
-}
-
-
-/* Perform is the second step, and it runs on the source host. */
-static int
-qemudDomainMigratePerform (virDomainPtr dom,
- const char *cookie ATTRIBUTE_UNUSED,
- int cookielen ATTRIBUTE_UNUSED,
- const char *uri,
- unsigned long flags,
- const char *dname,
- unsigned long resource)
-{
- struct qemud_driver *driver = dom->conn->privateData;
- virDomainObjPtr vm;
- virDomainEventPtr event = NULL;
- int ret = -1;
- int resume = 0;
- qemuDomainObjPrivatePtr priv;
-
- virCheckFlags(VIR_MIGRATE_LIVE |
- VIR_MIGRATE_PEER2PEER |
- VIR_MIGRATE_TUNNELLED |
- VIR_MIGRATE_PERSIST_DEST |
- VIR_MIGRATE_UNDEFINE_SOURCE |
- VIR_MIGRATE_PAUSED |
- VIR_MIGRATE_NON_SHARED_DISK |
- VIR_MIGRATE_NON_SHARED_INC, -1);
-
- qemuDriverLock(driver);
- vm = virDomainFindByUUID(&driver->domains, dom->uuid);
- if (!vm) {
- char uuidstr[VIR_UUID_STRING_BUFLEN];
- virUUIDFormat(dom->uuid, uuidstr);
- qemuReportError(VIR_ERR_NO_DOMAIN,
- _("no domain with matching uuid '%s'"),
uuidstr);
- goto cleanup;
- }
- priv = vm->privateData;
-
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
- goto cleanup;
- priv->jobActive = QEMU_JOB_MIGRATION_OUT;
-
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("domain is not running"));
- goto endjob;
- }
-
- memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
- priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
-
- resume = vm->state == VIR_DOMAIN_RUNNING;
- if (!(flags & VIR_MIGRATE_LIVE) && vm->state == VIR_DOMAIN_RUNNING) {
- if (qemuDomainMigrateOffline(driver, vm) < 0)
- goto endjob;
- }
-
- if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
- if (doPeer2PeerMigrate(dom, driver, vm, uri, flags, dname, resource) < 0)
- /* doPeer2PeerMigrate already set the error, so just get out */
- goto endjob;
- } else {
- if (doNativeMigrate(driver, vm, uri, flags, dname, resource) < 0)
- goto endjob;
- }
-
- /* Clean up the source domain. */
- qemuProcessStop(driver, vm, 1);
- qemuDomainStopAudit(vm, "migrated");
- resume = 0;
-
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_STOPPED,
- VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
- if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) {
- virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
- if (qemuDomainObjEndJob(vm) > 0)
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
- }
- ret = 0;
-
-endjob:
- if (resume && vm->state == VIR_DOMAIN_PAUSED) {
- /* we got here through some sort of failure; start the domain again */
- if (qemuProcessStartCPUs(driver, vm, dom->conn) < 0) {
- /* Hm, we already know we are in error here. We don't want to
- * overwrite the previous error, though, so we just throw something
- * to the logs and hope for the best
- */
- VIR_ERROR(_("Failed to resume guest %s after failure"),
- vm->def->name);
- }
-
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_RESUMED,
- VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
- }
- if (vm &&
- qemuDomainObjEndJob(vm) == 0)
- vm = NULL;
+ ret = qemuMigrationPerform(driver, dom->conn, vm,
+ uri, flags,
+ dname, resource);
cleanup:
- if (vm)
- virDomainObjUnlock(vm);
- if (event)
- qemuDomainEventQueue(driver, event);
qemuDriverUnlock(driver);
return ret;
}
-#if WITH_MACVTAP
-static void
-qemudVPAssociatePortProfiles(virDomainDefPtr def) {
- int i;
- int last_good_net = -1;
- virDomainNetDefPtr net;
-
- for (i = 0; i < def->nnets; i++) {
- net = def->nets[i];
- if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
- if (vpAssociatePortProfileId(net->ifname,
- net->mac,
- net->data.direct.linkdev,
- &net->data.direct.virtPortProfile,
- def->uuid,
- VIR_VM_OP_MIGRATE_IN_FINISH) != 0)
- goto err_exit;
- }
- last_good_net = i;
- }
-
- return;
-
-err_exit:
- for (i = 0; i < last_good_net; i++) {
- net = def->nets[i];
- if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
- vpDisassociatePortProfileId(net->ifname,
- net->mac,
- net->data.direct.linkdev,
- &net->data.direct.virtPortProfile,
- VIR_VM_OP_MIGRATE_IN_FINISH);
- }
- }
-}
-#else /* !WITH_MACVTAP */
-static void
-qemudVPAssociatePortProfiles(virDomainDefPtr def ATTRIBUTE_UNUSED) { }
-#endif /* WITH_MACVTAP */
/* Finish is the third and final step, and it runs on the destination host. */
static virDomainPtr
@@ -6609,10 +5497,7 @@ qemudDomainMigrateFinish2 (virConnectPtr dconn,
struct qemud_driver *driver = dconn->privateData;
virDomainObjPtr vm;
virDomainPtr dom = NULL;
- virDomainEventPtr event = NULL;
virErrorPtr orig_err;
- int newVM = 1;
- qemuDomainObjPrivatePtr priv = NULL;
virCheckFlags(VIR_MIGRATE_LIVE |
VIR_MIGRATE_PEER2PEER |
@@ -6634,118 +5519,18 @@ qemudDomainMigrateFinish2 (virConnectPtr dconn,
goto cleanup;
}
- priv = vm->privateData;
- if (priv->jobActive != QEMU_JOB_MIGRATION_IN) {
- qemuReportError(VIR_ERR_NO_DOMAIN,
- _("domain '%s' is not processing incoming
migration"), dname);
- goto cleanup;
- }
- priv->jobActive = QEMU_JOB_NONE;
- memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
-
- if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
- goto cleanup;
-
- /* Did the migration go as planned? If yes, return the domain
- * object, but if no, clean up the empty qemu process.
- */
- if (retcode == 0) {
- if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit"));
- goto cleanup;
- }
-
- qemudVPAssociatePortProfiles(vm->def);
-
- if (flags & VIR_MIGRATE_PERSIST_DEST) {
- if (vm->persistent)
- newVM = 0;
- vm->persistent = 1;
-
- if (virDomainSaveConfig(driver->configDir, vm->def) < 0) {
- /* Hmpf. Migration was successful, but making it persistent
- * was not. If we report successful, then when this domain
- * shuts down, management tools are in for a surprise. On the
- * other hand, if we report failure, then the management tools
- * might try to restart the domain on the source side, even
- * though the domain is actually running on the destination.
- * Return a NULL dom pointer, and hope that this is a rare
- * situation and management tools are smart.
- */
- vm = NULL;
- goto endjob;
- }
-
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_DEFINED,
- newVM ?
- VIR_DOMAIN_EVENT_DEFINED_ADDED :
- VIR_DOMAIN_EVENT_DEFINED_UPDATED);
- if (event)
- qemuDomainEventQueue(driver, event);
- event = NULL;
-
- }
- dom = virGetDomain (dconn, vm->def->name, vm->def->uuid);
-
- if (!(flags & VIR_MIGRATE_PAUSED)) {
- /* run 'cont' on the destination, which allows migration on qemu
- * >= 0.10.6 to work properly. This isn't strictly necessary on
- * older qemu's, but it also doesn't hurt anything there
- */
- if (qemuProcessStartCPUs(driver, vm, dconn) < 0) {
- if (virGetLastError() == NULL)
- qemuReportError(VIR_ERR_INTERNAL_ERROR,
- "%s", _("resume operation
failed"));
- goto endjob;
- }
- }
-
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_RESUMED,
- VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
- if (vm->state == VIR_DOMAIN_PAUSED) {
- qemuDomainEventQueue(driver, event);
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_SUSPENDED,
- VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
- }
- if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
- VIR_WARN("Failed to save status on vm %s", vm->def->name);
- goto endjob;
- }
- } else {
- qemuProcessStop(driver, vm, 1);
- qemuDomainStopAudit(vm, "failed");
- event = virDomainEventNewFromObj(vm,
- VIR_DOMAIN_EVENT_STOPPED,
- VIR_DOMAIN_EVENT_STOPPED_FAILED);
- if (!vm->persistent) {
- if (qemuDomainObjEndJob(vm) > 0)
- virDomainRemoveInactive(&driver->domains, vm);
- vm = NULL;
- }
- }
-
-endjob:
- if (vm &&
- qemuDomainObjEndJob(vm) == 0)
- vm = NULL;
+ dom = qemuMigrationFinish(driver, dconn, vm, flags, retcode);
cleanup:
if (orig_err) {
virSetError(orig_err);
virFreeError(orig_err);
}
- if (vm)
- virDomainObjUnlock(vm);
- if (event)
- qemuDomainEventQueue(driver, event);
qemuDriverUnlock(driver);
return dom;
}
+
static int
qemudNodeDeviceGetPciInfo (virNodeDevicePtr dev,
unsigned *domain,
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
new file mode 100644
index 0000000..8d23cc5
--- /dev/null
+++ b/src/qemu/qemu_migration.c
@@ -0,0 +1,1295 @@
+/*
+ * qemu_migration.c: QEMU migration handling
+ *
+ * Copyright (C) 2006-2011 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <config.h>
+
+#include <sys/time.h>
+
+#include "qemu_migration.h"
+#include "qemu_monitor.h"
+#include "qemu_domain.h"
+#include "qemu_process.h"
+#include "qemu_capabilities.h"
+#include "qemu_audit.h"
+
+#include "logging.h"
+#include "virterror_internal.h"
+#include "memory.h"
+#include "util.h"
+#include "files.h"
+#include "datatypes.h"
+#include "fdstream.h"
+
+#define VIR_FROM_THIS VIR_FROM_QEMU
+
+#define timeval_to_ms(tv) (((tv).tv_sec * 1000ull) + ((tv).tv_usec / 1000))
+
+
+bool
+qemuMigrationIsAllowed(virDomainDefPtr def)
+{
+ if (def->nhostdevs > 0) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("Domain with assigned host devices cannot be
migrated"));
+ return false;
+ }
+
+ return true;
+}
+
+/** qemuMigrationSetOffline
+ * Pause domain for non-live migration.
+ */
+int
+qemuMigrationSetOffline(struct qemud_driver *driver,
+ virDomainObjPtr vm)
+{
+ int ret;
+
+ ret = qemuProcessStopCPUs(driver, vm);
+ if (ret == 0) {
+ virDomainEventPtr event;
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_SUSPENDED,
+ VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ }
+
+ return ret;
+}
+
+
+int
+qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
+{
+ int ret = -1;
+ int status;
+ unsigned long long memProcessed;
+ unsigned long long memRemaining;
+ unsigned long long memTotal;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
+
+ while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
+ /* Poll every 50ms for progress & to allow cancellation */
+ struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
+ struct timeval now;
+ int rc;
+ const char *job;
+
+ switch (priv->jobActive) {
+ case QEMU_JOB_MIGRATION_OUT:
+ job = _("migration job");
+ break;
+ case QEMU_JOB_SAVE:
+ job = _("domain save job");
+ break;
+ case QEMU_JOB_DUMP:
+ job = _("domain core dump job");
+ break;
+ default:
+ job = _("job");
+ }
+
+
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
+ job, _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
+ priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
+ VIR_DEBUG0("Cancelling job at client request");
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ rc = qemuMonitorMigrateCancel(priv->mon);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (rc < 0) {
+ VIR_WARN0("Unable to cancel job");
+ }
+ } else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
+ priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
+ VIR_DEBUG0("Pausing domain for non-live migration");
+ if (qemuMigrationSetOffline(driver, vm) < 0)
+ VIR_WARN0("Unable to pause domain");
+ } else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) {
+ unsigned long long ms = priv->jobSignalsData.migrateDowntime;
+
+ priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
+ priv->jobSignalsData.migrateDowntime = 0;
+ VIR_DEBUG("Setting migration downtime to %llums", ms);
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ rc = qemuMonitorSetMigrationDowntime(priv->mon, ms);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (rc < 0)
+ VIR_WARN0("Unable to set migration downtime");
+ }
+
+ /* Repeat check because the job signals might have caused
+ * guest to die
+ */
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
+ job, _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ rc = qemuMonitorGetMigrationStatus(priv->mon,
+ &status,
+ &memProcessed,
+ &memRemaining,
+ &memTotal);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ if (rc < 0) {
+ priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
+ goto cleanup;
+ }
+
+ if (gettimeofday(&now, NULL) < 0) {
+ priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
+ virReportSystemError(errno, "%s",
+ _("cannot get time of day"));
+ goto cleanup;
+ }
+ priv->jobInfo.timeElapsed = timeval_to_ms(now) - priv->jobStart;
+
+ switch (status) {
+ case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
+ priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ _("%s: %s"), job, _("is not active"));
+ break;
+
+ case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
+ priv->jobInfo.dataTotal = memTotal;
+ priv->jobInfo.dataRemaining = memRemaining;
+ priv->jobInfo.dataProcessed = memProcessed;
+
+ priv->jobInfo.memTotal = memTotal;
+ priv->jobInfo.memRemaining = memRemaining;
+ priv->jobInfo.memProcessed = memProcessed;
+ break;
+
+ case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
+ priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED;
+ ret = 0;
+ break;
+
+ case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
+ priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ _("%s: %s"), job, _("unexpectedly
failed"));
+ break;
+
+ case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
+ priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ _("%s: %s"), job, _("canceled by
client"));
+ break;
+ }
+
+ virDomainObjUnlock(vm);
+ qemuDriverUnlock(driver);
+
+ nanosleep(&ts, NULL);
+
+ qemuDriverLock(driver);
+ virDomainObjLock(vm);
+ }
+
+cleanup:
+ return ret;
+}
+
+
+/* Prepare is the first step, and it runs on the destination host.
+ *
+ * This version starts an empty VM listening on a localhost TCP port, and
+ * sets up the corresponding virStream to handle the incoming data.
+ */
+int
+qemuMigrationPrepareTunnel(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ virStreamPtr st,
+ const char *dname,
+ const char *dom_xml)
+{
+ virDomainDefPtr def = NULL;
+ virDomainObjPtr vm = NULL;
+ char *migrateFrom;
+ virDomainEventPtr event = NULL;
+ int ret = -1;
+ int internalret;
+ char *unixfile = NULL;
+ unsigned long long qemuCmdFlags;
+ qemuDomainObjPrivatePtr priv = NULL;
+ struct timeval now;
+
+ if (gettimeofday(&now, NULL) < 0) {
+ virReportSystemError(errno, "%s",
+ _("cannot get time of day"));
+ return -1;
+ }
+
+ /* Parse the domain XML. */
+ if (!(def = virDomainDefParseString(driver->caps, dom_xml,
+ VIR_DOMAIN_XML_INACTIVE))) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to parse XML"));
+ goto cleanup;
+ }
+
+ if (!qemuMigrationIsAllowed(def))
+ goto cleanup;
+
+ /* Target domain name, maybe renamed. */
+ if (dname) {
+ VIR_FREE(def->name);
+ def->name = strdup(dname);
+ if (def->name == NULL)
+ goto cleanup;
+ }
+
+ if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
+ goto cleanup;
+
+ if (!(vm = virDomainAssignDef(driver->caps,
+ &driver->domains,
+ def, true))) {
+ /* virDomainAssignDef already set the error */
+ goto cleanup;
+ }
+ def = NULL;
+ priv = vm->privateData;
+
+ if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ goto cleanup;
+ priv->jobActive = QEMU_JOB_MIGRATION_OUT;
+
+ /* Domain starts inactive, even if the domain XML had an id field. */
+ vm->def->id = -1;
+
+ if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.dest.%s",
+ driver->libDir, vm->def->name) < 0) {
+ virReportOOMError();
+ goto endjob;
+ }
+ unlink(unixfile);
+
+ /* check that this qemu version supports the interactive exec */
+ if (qemuCapsExtractVersionInfo(vm->def->emulator, NULL, &qemuCmdFlags) <
0) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Cannot determine QEMU argv syntax %s"),
+ vm->def->emulator);
+ goto endjob;
+ }
+ if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX)
+ internalret = virAsprintf(&migrateFrom, "unix:%s", unixfile);
+ else if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC)
+ internalret = virAsprintf(&migrateFrom, "exec:nc -U -l %s",
unixfile);
+ else {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("Destination qemu is too old to support
tunnelled migration"));
+ goto endjob;
+ }
+ if (internalret < 0) {
+ virReportOOMError();
+ goto endjob;
+ }
+ /* Start the QEMU daemon, with the same command-line arguments plus
+ * -incoming unix:/path/to/file or exec:nc -U /path/to/file
+ */
+ internalret = qemuProcessStart(dconn, driver, vm, migrateFrom, true,
+ -1, NULL, VIR_VM_OP_MIGRATE_IN_START);
+ VIR_FREE(migrateFrom);
+ if (internalret < 0) {
+ qemuDomainStartAudit(vm, "migrated", false);
+ /* Note that we don't set an error here because qemuProcessStart
+ * should have already done that.
+ */
+ if (!vm->persistent) {
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+ goto endjob;
+ }
+
+ if (virFDStreamConnectUNIX(st,
+ unixfile,
+ false) < 0) {
+ qemuDomainStartAudit(vm, "migrated", false);
+ qemuProcessStop(driver, vm, 0);
+ if (!vm->persistent) {
+ if (qemuDomainObjEndJob(vm) > 0)
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+ virReportSystemError(errno,
+ _("cannot open unix socket '%s' for tunnelled
migration"),
+ unixfile);
+ goto endjob;
+ }
+
+ qemuDomainStartAudit(vm, "migrated", true);
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STARTED,
+ VIR_DOMAIN_EVENT_STARTED_MIGRATED);
+ ret = 0;
+
+endjob:
+ if (vm &&
+ qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+
+ /* We set a fake job active which is held across
+ * API calls until the finish() call. This prevents
+ * any other APIs being invoked while incoming
+ * migration is taking place
+ */
+ if (vm &&
+ virDomainObjIsActive(vm)) {
+ priv->jobActive = QEMU_JOB_MIGRATION_IN;
+ priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
+ priv->jobStart = timeval_to_ms(now);
+ }
+
+cleanup:
+ virDomainDefFree(def);
+ if (unixfile)
+ unlink(unixfile);
+ VIR_FREE(unixfile);
+ if (vm)
+ virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ qemuDriverUnlock(driver);
+ return ret;
+}
+
+
+int
+qemuMigrationPrepareDirect(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ const char *uri_in,
+ char **uri_out,
+ const char *dname,
+ const char *dom_xml)
+{
+ static int port = 0;
+ virDomainDefPtr def = NULL;
+ virDomainObjPtr vm = NULL;
+ int this_port;
+ char *hostname = NULL;
+ char migrateFrom [64];
+ const char *p;
+ virDomainEventPtr event = NULL;
+ int ret = -1;
+ int internalret;
+ qemuDomainObjPrivatePtr priv = NULL;
+ struct timeval now;
+
+ if (gettimeofday(&now, NULL) < 0) {
+ virReportSystemError(errno, "%s",
+ _("cannot get time of day"));
+ return -1;
+ }
+
+ /* The URI passed in may be NULL or a string "tcp://somehostname:port".
+ *
+ * If the URI passed in is NULL then we allocate a port number
+ * from our pool of port numbers and return a URI of
+ * "tcp://ourhostname:port".
+ *
+ * If the URI passed in is not NULL then we try to parse out the
+ * port number and use that (note that the hostname is assumed
+ * to be a correct hostname which refers to the target machine).
+ */
+ if (uri_in == NULL) {
+ this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
+ if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0;
+
+ /* Get hostname */
+ if ((hostname = virGetHostname(NULL)) == NULL)
+ goto cleanup;
+
+ if (STRPREFIX(hostname, "localhost")) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("hostname on destination resolved to localhost, but
migration requires an FQDN"));
+ goto cleanup;
+ }
+
+ /* XXX this really should have been a properly well-formed
+ * URI, but we can't add in tcp:// now without breaking
+ * compatability with old targets. We at least make the
+ * new targets accept both syntaxes though.
+ */
+ /* Caller frees */
+ internalret = virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port);
+ if (internalret < 0) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ } else {
+ /* Check the URI starts with "tcp:". We will escape the
+ * URI when passing it to the qemu monitor, so bad
+ * characters in hostname part don't matter.
+ */
+ if (!STRPREFIX (uri_in, "tcp:")) {
+ qemuReportError (VIR_ERR_INVALID_ARG,
+ "%s", _("only tcp URIs are supported for
KVM/QEMU migrations"));
+ goto cleanup;
+ }
+
+ /* Get the port number. */
+ p = strrchr (uri_in, ':');
+ if (p == strchr(uri_in, ':')) {
+ /* Generate a port */
+ this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
+ if (port == QEMUD_MIGRATION_NUM_PORTS)
+ port = 0;
+
+ /* Caller frees */
+ if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ } else {
+ p++; /* definitely has a ':' in it, see above */
+ this_port = virParseNumber (&p);
+ if (this_port == -1 || p-uri_in != strlen (uri_in)) {
+ qemuReportError(VIR_ERR_INVALID_ARG,
+ "%s", _("URI ended with incorrect
':port'"));
+ goto cleanup;
+ }
+ }
+ }
+
+ if (*uri_out)
+ VIR_DEBUG("Generated uri_out=%s", *uri_out);
+
+ /* Parse the domain XML. */
+ if (!(def = virDomainDefParseString(driver->caps, dom_xml,
+ VIR_DOMAIN_XML_INACTIVE))) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to parse XML"));
+ goto cleanup;
+ }
+
+ if (!qemuMigrationIsAllowed(def))
+ goto cleanup;
+
+ /* Target domain name, maybe renamed. */
+ if (dname) {
+ VIR_FREE(def->name);
+ def->name = strdup(dname);
+ if (def->name == NULL)
+ goto cleanup;
+ }
+
+ if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
+ goto cleanup;
+
+ if (!(vm = virDomainAssignDef(driver->caps,
+ &driver->domains,
+ def, true))) {
+ /* virDomainAssignDef already set the error */
+ goto cleanup;
+ }
+ def = NULL;
+ priv = vm->privateData;
+
+ if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ goto cleanup;
+ priv->jobActive = QEMU_JOB_MIGRATION_OUT;
+
+ /* Domain starts inactive, even if the domain XML had an id field. */
+ vm->def->id = -1;
+
+ /* Start the QEMU daemon, with the same command-line arguments plus
+ * -incoming tcp:0.0.0.0:port
+ */
+ snprintf (migrateFrom, sizeof (migrateFrom), "tcp:0.0.0.0:%d", this_port);
+ if (qemuProcessStart(dconn, driver, vm, migrateFrom, true,
+ -1, NULL, VIR_VM_OP_MIGRATE_IN_START) < 0) {
+ qemuDomainStartAudit(vm, "migrated", false);
+ /* Note that we don't set an error here because qemuProcessStart
+ * should have already done that.
+ */
+ if (!vm->persistent) {
+ if (qemuDomainObjEndJob(vm) > 0)
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+ goto endjob;
+ }
+
+ qemuDomainStartAudit(vm, "migrated", true);
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STARTED,
+ VIR_DOMAIN_EVENT_STARTED_MIGRATED);
+ ret = 0;
+
+endjob:
+ if (vm &&
+ qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+
+ /* We set a fake job active which is held across
+ * API calls until the finish() call. This prevents
+ * any other APIs being invoked while incoming
+ * migration is taking place
+ */
+ if (vm &&
+ virDomainObjIsActive(vm)) {
+ priv->jobActive = QEMU_JOB_MIGRATION_IN;
+ priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
+ priv->jobStart = timeval_to_ms(now);
+ }
+
+cleanup:
+ VIR_FREE(hostname);
+ virDomainDefFree(def);
+ if (ret != 0)
+ VIR_FREE(*uri_out);
+ if (vm)
+ virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ return ret;
+}
+
+
+/* Perform migration using QEMU's native TCP migrate support,
+ * not encrypted obviously
+ */
+static int doNativeMigrate(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ const char *uri,
+ unsigned int flags,
+ const char *dname ATTRIBUTE_UNUSED,
+ unsigned long resource)
+{
+ int ret = -1;
+ xmlURIPtr uribits = NULL;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
+
+ /* Issue the migrate command. */
+ if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://"))
{
+ /* HACK: source host generates bogus URIs, so fix them up */
+ char *tmpuri;
+ if (virAsprintf(&tmpuri, "tcp://%s", uri +
strlen("tcp:")) < 0) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ uribits = xmlParseURI(tmpuri);
+ VIR_FREE(tmpuri);
+ } else {
+ uribits = xmlParseURI(uri);
+ }
+ if (!uribits) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("cannot parse URI %s"), uri);
+ goto cleanup;
+ }
+
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (resource > 0 &&
+ qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ goto cleanup;
+ }
+
+ if (flags & VIR_MIGRATE_NON_SHARED_DISK)
+ background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
+
+ if (flags & VIR_MIGRATE_NON_SHARED_INC)
+ background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
+
+ if (qemuMonitorMigrateToHost(priv->mon, background_flags, uribits->server,
+ uribits->port) < 0) {
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ goto cleanup;
+ }
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ if (qemuMigrationWaitForCompletion(driver, vm) < 0)
+ goto cleanup;
+
+ ret = 0;
+
+cleanup:
+ xmlFreeURI(uribits);
+ return ret;
+}
+
+
+#define TUNNEL_SEND_BUF_SIZE 65536
+
+static int doTunnelSendAll(virStreamPtr st,
+ int sock)
+{
+ char *buffer;
+ int nbytes = TUNNEL_SEND_BUF_SIZE;
+
+ if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0) {
+ virReportOOMError();
+ virStreamAbort(st);
+ return -1;
+ }
+
+ /* XXX should honour the 'resource' parameter here */
+ for (;;) {
+ nbytes = saferead(sock, buffer, nbytes);
+ if (nbytes < 0) {
+ virReportSystemError(errno, "%s",
+ _("tunnelled migration failed to read from
qemu"));
+ virStreamAbort(st);
+ VIR_FREE(buffer);
+ return -1;
+ }
+ else if (nbytes == 0)
+ /* EOF; get out of here */
+ break;
+
+ if (virStreamSend(st, buffer, nbytes) < 0) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("Failed to write migration data to remote
libvirtd"));
+ VIR_FREE(buffer);
+ return -1;
+ }
+ }
+
+ VIR_FREE(buffer);
+
+ if (virStreamFinish(st) < 0)
+ /* virStreamFinish set the error for us */
+ return -1;
+
+ return 0;
+}
+
+static int doTunnelMigrate(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *dom_xml,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ int client_sock = -1;
+ int qemu_sock = -1;
+ struct sockaddr_un sa_qemu, sa_client;
+ socklen_t addrlen;
+ virDomainPtr ddomain = NULL;
+ int retval = -1;
+ virStreamPtr st = NULL;
+ char *unixfile = NULL;
+ int internalret;
+ unsigned long long qemuCmdFlags;
+ int status;
+ unsigned long long transferred, remaining, total;
+ unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
+
+ /*
+ * The order of operations is important here to avoid touching
+ * the source VM until we are very sure we can successfully
+ * start the migration operation.
+ *
+ * 1. setup local support infrastructure (eg sockets)
+ * 2. setup destination fully
+ * 3. start migration on source
+ */
+
+
+ /* Stage 1. setup local support infrastructure */
+
+ if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s",
+ driver->libDir, vm->def->name) < 0) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (qemu_sock < 0) {
+ virReportSystemError(errno, "%s",
+ _("cannot open tunnelled migration socket"));
+ goto cleanup;
+ }
+ memset(&sa_qemu, 0, sizeof(sa_qemu));
+ sa_qemu.sun_family = AF_UNIX;
+ if (virStrcpy(sa_qemu.sun_path, unixfile,
+ sizeof(sa_qemu.sun_path)) == NULL) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unix socket '%s' too big for destination"),
+ unixfile);
+ goto cleanup;
+ }
+ unlink(unixfile);
+ if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) {
+ virReportSystemError(errno,
+ _("Cannot bind to unix socket '%s' for
tunnelled migration"),
+ unixfile);
+ goto cleanup;
+ }
+ if (listen(qemu_sock, 1) < 0) {
+ virReportSystemError(errno,
+ _("Cannot listen on unix socket '%s' for
tunnelled migration"),
+ unixfile);
+ goto cleanup;
+ }
+
+ if (chown(unixfile, driver->user, driver->group) < 0) {
+ virReportSystemError(errno,
+ _("Cannot change unix socket '%s'
owner"),
+ unixfile);
+ goto cleanup;
+ }
+
+ /* check that this qemu version supports the unix migration */
+ if (qemuCapsExtractVersionInfo(vm->def->emulator, NULL, &qemuCmdFlags) <
0) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Cannot extract Qemu version from '%s'"),
+ vm->def->emulator);
+ goto cleanup;
+ }
+
+ if (!(qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX) &&
+ !(qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC)) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("Source qemu is too old to support
tunnelled migration"));
+ goto cleanup;
+ }
+
+
+ /* Stage 2. setup destination fully
+ *
+ * Once stage 2 has completed successfully, we *must* call finish
+ * to cleanup the target whether we succeed or fail
+ */
+ st = virStreamNew(dconn, 0);
+ if (st == NULL)
+ /* virStreamNew only fails on OOM, and it reports the error itself */
+ goto cleanup;
+
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st,
+ flags, dname,
+ resource, dom_xml);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+
+ if (internalret < 0)
+ /* domainMigratePrepareTunnel sets the error for us */
+ goto cleanup;
+
+ /* the domain may have shutdown or crashed while we had the locks dropped
+ * in qemuDomainObjEnterRemoteWithDriver, so check again
+ */
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ /* 3. start migration on source */
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (flags & VIR_MIGRATE_NON_SHARED_DISK)
+ background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
+ if (flags & VIR_MIGRATE_NON_SHARED_INC)
+ background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
+ if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX){
+ internalret = qemuMonitorMigrateToUnix(priv->mon, background_flags,
+ unixfile);
+ }
+ else if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC) {
+ const char *args[] = { "nc", "-U", unixfile, NULL };
+ internalret = qemuMonitorMigrateToCommand(priv->mon,
QEMU_MONITOR_MIGRATE_BACKGROUND, args);
+ } else {
+ internalret = -1;
+ }
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (internalret < 0) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("tunnelled migration monitor command
failed"));
+ goto finish;
+ }
+
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ /* From this point onwards we *must* call cancel to abort the
+ * migration on source if anything goes wrong */
+
+ /* it is also possible that the migrate didn't fail initially, but
+ * rather failed later on. Check the output of "info migrate"
+ */
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ if (qemuMonitorGetMigrationStatus(priv->mon,
+ &status,
+ &transferred,
+ &remaining,
+ &total) < 0) {
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ goto cancel;
+ }
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ if (status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s",_("migrate failed"));
+ goto cancel;
+ }
+
+ addrlen = sizeof(sa_client);
+ while ((client_sock = accept(qemu_sock, (struct sockaddr *)&sa_client,
&addrlen)) < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ virReportSystemError(errno, "%s",
+ _("tunnelled migration failed to accept from
qemu"));
+ goto cancel;
+ }
+
+ retval = doTunnelSendAll(st, client_sock);
+
+cancel:
+ if (retval != 0 && virDomainObjIsActive(vm)) {
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ qemuMonitorMigrateCancel(priv->mon);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+ }
+
+finish:
+ dname = dname ? dname : vm->def->name;
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ ddomain = dconn->driver->domainMigrateFinish2
+ (dconn, dname, NULL, 0, uri, flags, retval);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+
+cleanup:
+ VIR_FORCE_CLOSE(client_sock);
+ VIR_FORCE_CLOSE(qemu_sock);
+
+ if (ddomain)
+ virUnrefDomain(ddomain);
+
+ if (unixfile) {
+ unlink(unixfile);
+ VIR_FREE(unixfile);
+ }
+
+ if (st)
+ /* don't call virStreamFree(), because that resets any pending errors */
+ virUnrefStream(st);
+ return retval;
+}
+
+
+/* This is essentially a simplified re-impl of
+ * virDomainMigrateVersion2 from libvirt.c, but running in source
+ * libvirtd context, instead of client app context */
+static int doNonTunnelMigrate(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *dom_xml,
+ const char *uri ATTRIBUTE_UNUSED,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ virDomainPtr ddomain = NULL;
+ int retval = -1;
+ char *uri_out = NULL;
+ int rc;
+
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ /* NB we don't pass 'uri' into this, since that's the libvirtd
+ * URI in this context - so we let dest pick it */
+ rc = dconn->driver->domainMigratePrepare2(dconn,
+ NULL, /* cookie */
+ 0, /* cookielen */
+ NULL, /* uri */
+ &uri_out,
+ flags, dname,
+ resource, dom_xml);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+ if (rc < 0)
+ /* domainMigratePrepare2 sets the error for us */
+ goto cleanup;
+
+ /* the domain may have shutdown or crashed while we had the locks dropped
+ * in qemuDomainObjEnterRemoteWithDriver, so check again
+ */
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ if (uri_out == NULL) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("domainMigratePrepare2 did not set uri"));
+ goto cleanup;
+ }
+
+ if (doNativeMigrate(driver, vm, uri_out, flags, dname, resource) < 0)
+ goto finish;
+
+ retval = 0;
+
+finish:
+ dname = dname ? dname : vm->def->name;
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ ddomain = dconn->driver->domainMigrateFinish2
+ (dconn, dname, NULL, 0, uri_out, flags, retval);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+
+ if (ddomain)
+ virUnrefDomain(ddomain);
+
+cleanup:
+ return retval;
+}
+
+
+static int doPeer2PeerMigrate(struct qemud_driver *driver,
+ virDomainObjPtr vm,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ int ret = -1;
+ virConnectPtr dconn = NULL;
+ char *dom_xml;
+ bool p2p;
+
+ /* the order of operations is important here; we make sure the
+ * destination side is completely setup before we touch the source
+ */
+
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ dconn = virConnectOpen(uri);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+ if (dconn == NULL) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ _("Failed to connect to remote libvirt URI %s"), uri);
+ return -1;
+ }
+
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
+ VIR_DRV_FEATURE_MIGRATION_P2P);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+ if (!p2p) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("Destination libvirt does not support peer-to-peer
migration protocol"));
+ goto cleanup;
+ }
+
+ /* domain may have been stopped while we were talking to remote daemon */
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ dom_xml = qemuDomainFormatXML(driver, vm,
+ VIR_DOMAIN_XML_SECURE |
+ VIR_DOMAIN_XML_UPDATE_CPU);
+ if (!dom_xml) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ "%s", _("failed to get domain xml"));
+ goto cleanup;
+ }
+
+ if (flags & VIR_MIGRATE_TUNNELLED)
+ ret = doTunnelMigrate(driver, dconn, vm, dom_xml, uri, flags, dname, resource);
+ else
+ ret = doNonTunnelMigrate(driver, dconn, vm, dom_xml, uri, flags, dname,
resource);
+
+cleanup:
+ VIR_FREE(dom_xml);
+ /* don't call virConnectClose(), because that resets any pending errors */
+ qemuDomainObjEnterRemoteWithDriver(driver, vm);
+ virUnrefConnect(dconn);
+ qemuDomainObjExitRemoteWithDriver(driver, vm);
+
+ return ret;
+}
+
+
+int qemuMigrationPerform(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
+{
+ virDomainEventPtr event = NULL;
+ int ret = -1;
+ int resume = 0;
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ goto cleanup;
+ priv->jobActive = QEMU_JOB_MIGRATION_OUT;
+
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID,
+ "%s", _("domain is not running"));
+ goto endjob;
+ }
+
+ memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
+ priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
+
+ resume = vm->state == VIR_DOMAIN_RUNNING;
+ if (!(flags & VIR_MIGRATE_LIVE) && vm->state == VIR_DOMAIN_RUNNING) {
+ if (qemuMigrationSetOffline(driver, vm) < 0)
+ goto endjob;
+ }
+
+ if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
+ if (doPeer2PeerMigrate(driver, vm, uri, flags, dname, resource) < 0)
+ /* doPeer2PeerMigrate already set the error, so just get out */
+ goto endjob;
+ } else {
+ if (doNativeMigrate(driver, vm, uri, flags, dname, resource) < 0)
+ goto endjob;
+ }
+
+ /* Clean up the source domain. */
+ qemuProcessStop(driver, vm, 1);
+ qemuDomainStopAudit(vm, "migrated");
+ resume = 0;
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STOPPED,
+ VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
+ if (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE)) {
+ virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
+ if (qemuDomainObjEndJob(vm) > 0)
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+ ret = 0;
+
+endjob:
+ if (resume && vm->state == VIR_DOMAIN_PAUSED) {
+ /* we got here through some sort of failure; start the domain again */
+ if (qemuProcessStartCPUs(driver, vm, conn) < 0) {
+ /* Hm, we already know we are in error here. We don't want to
+ * overwrite the previous error, though, so we just throw something
+ * to the logs and hope for the best
+ */
+ VIR_ERROR(_("Failed to resume guest %s after failure"),
+ vm->def->name);
+ }
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_RESUMED,
+ VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
+ }
+ if (vm &&
+ qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+
+cleanup:
+ if (vm)
+ virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ return ret;
+}
+
+
+#if WITH_MACVTAP
+static void
+qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) {
+ int i;
+ int last_good_net = -1;
+ virDomainNetDefPtr net;
+
+ for (i = 0; i < def->nnets; i++) {
+ net = def->nets[i];
+ if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
+ if (vpAssociatePortProfileId(net->ifname,
+ net->mac,
+ net->data.direct.linkdev,
+ &net->data.direct.virtPortProfile,
+ def->uuid,
+ VIR_VM_OP_MIGRATE_IN_FINISH) != 0)
+ goto err_exit;
+ }
+ last_good_net = i;
+ }
+
+ return;
+
+err_exit:
+ for (i = 0; i < last_good_net; i++) {
+ net = def->nets[i];
+ if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
+ vpDisassociatePortProfileId(net->ifname,
+ net->mac,
+ net->data.direct.linkdev,
+ &net->data.direct.virtPortProfile,
+ VIR_VM_OP_MIGRATE_IN_FINISH);
+ }
+ }
+}
+#else /* !WITH_MACVTAP */
+static void
+qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def ATTRIBUTE_UNUSED) { }
+#endif /* WITH_MACVTAP */
+
+
+virDomainPtr
+qemuMigrationFinish(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ unsigned long flags,
+ int retcode)
+{
+ virDomainPtr dom = NULL;
+ virDomainEventPtr event = NULL;
+ int newVM = 1;
+ qemuDomainObjPrivatePtr priv = NULL;
+
+ priv = vm->privateData;
+ if (priv->jobActive != QEMU_JOB_MIGRATION_IN) {
+ qemuReportError(VIR_ERR_NO_DOMAIN,
+ _("domain '%s' is not processing incoming
migration"), vm->def->name);
+ goto cleanup;
+ }
+ priv->jobActive = QEMU_JOB_NONE;
+ memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
+
+ if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
+ goto cleanup;
+
+ /* Did the migration go as planned? If yes, return the domain
+ * object, but if no, clean up the empty qemu process.
+ */
+ if (retcode == 0) {
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("guest unexpectedly quit"));
+ goto cleanup;
+ }
+
+ qemuMigrationVPAssociatePortProfiles(vm->def);
+
+ if (flags & VIR_MIGRATE_PERSIST_DEST) {
+ if (vm->persistent)
+ newVM = 0;
+ vm->persistent = 1;
+
+ if (virDomainSaveConfig(driver->configDir, vm->def) < 0) {
+ /* Hmpf. Migration was successful, but making it persistent
+ * was not. If we report successful, then when this domain
+ * shuts down, management tools are in for a surprise. On the
+ * other hand, if we report failure, then the management tools
+ * might try to restart the domain on the source side, even
+ * though the domain is actually running on the destination.
+ * Return a NULL dom pointer, and hope that this is a rare
+ * situation and management tools are smart.
+ */
+ vm = NULL;
+ goto endjob;
+ }
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_DEFINED,
+ newVM ?
+ VIR_DOMAIN_EVENT_DEFINED_ADDED :
+ VIR_DOMAIN_EVENT_DEFINED_UPDATED);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ event = NULL;
+
+ }
+ dom = virGetDomain (dconn, vm->def->name, vm->def->uuid);
+
+ if (!(flags & VIR_MIGRATE_PAUSED)) {
+ /* run 'cont' on the destination, which allows migration on qemu
+ * >= 0.10.6 to work properly. This isn't strictly necessary on
+ * older qemu's, but it also doesn't hurt anything there
+ */
+ if (qemuProcessStartCPUs(driver, vm, dconn) < 0) {
+ if (virGetLastError() == NULL)
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("resume operation
failed"));
+ goto endjob;
+ }
+ }
+
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_RESUMED,
+ VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
+ if (vm->state == VIR_DOMAIN_PAUSED) {
+ qemuDomainEventQueue(driver, event);
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_SUSPENDED,
+ VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
+ }
+ if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
+ VIR_WARN("Failed to save status on vm %s", vm->def->name);
+ goto endjob;
+ }
+ } else {
+ qemuProcessStop(driver, vm, 1);
+ qemuDomainStopAudit(vm, "failed");
+ event = virDomainEventNewFromObj(vm,
+ VIR_DOMAIN_EVENT_STOPPED,
+ VIR_DOMAIN_EVENT_STOPPED_FAILED);
+ if (!vm->persistent) {
+ if (qemuDomainObjEndJob(vm) > 0)
+ virDomainRemoveInactive(&driver->domains, vm);
+ vm = NULL;
+ }
+ }
+
+endjob:
+ if (vm &&
+ qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+
+cleanup:
+ if (vm)
+ virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
+ return dom;
+}
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
new file mode 100644
index 0000000..3cac617
--- /dev/null
+++ b/src/qemu/qemu_migration.h
@@ -0,0 +1,63 @@
+/*
+ * qemu_migration.h: QEMU migration handling
+ *
+ * Copyright (C) 2006-2011 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __QEMU_MIGRATION_H__
+# define __QEMU_MIGRATION_H__
+
+# include "qemu_conf.h"
+
+
+bool qemuMigrationIsAllowed(virDomainDefPtr def)
+ ATTRIBUTE_NONNULL(1);
+int qemuMigrationSetOffline(struct qemud_driver *driver,
+ virDomainObjPtr vm);
+
+int qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm);
+
+int qemuMigrationPrepareTunnel(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ virStreamPtr st,
+ const char *dname,
+ const char *dom_xml);
+
+int qemuMigrationPrepareDirect(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ const char *uri_in,
+ char **uri_out,
+ const char *dname,
+ const char *dom_xml);
+
+int qemuMigrationPerform(struct qemud_driver *driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *uri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource);
+
+virDomainPtr qemuMigrationFinish(struct qemud_driver *driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ unsigned long flags,
+ int retcode);
+
+
+#endif /* __QEMU_MIGRATION_H__ */
--
1.7.4