[libvirt] [PATCH V3] implement offline migration

Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- daemon/remote.c | 46 +++++++++++++++++++++++++++ docs/hvsupport.pl | 2 + include/libvirt/libvirt.h.in | 6 +++ python/generator.py | 1 + src/driver.h | 5 +++ src/libvirt.c | 22 +++++++++++++ src/libvirt_public.syms | 1 + src/remote/remote_driver.c | 70 ++++++++++++++++++++++++++++++++++++++++++ src/remote/remote_protocol.x | 10 +++++- tools/virsh-domain.c | 69 +++++++++++++++++++++++++++++++++++++++++ 10 files changed, 231 insertions(+), 1 deletions(-) diff --git a/daemon/remote.c b/daemon/remote.c index 24928f4..c47a580 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -21,6 +21,9 @@ */ #include <config.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> #include "virterror_internal.h" @@ -48,6 +51,7 @@ #include "virdbus.h" #include "remote_protocol.h" #include "qemu_protocol.h" +#include "fdstream.h" #define VIR_FROM_THIS VIR_FROM_RPC @@ -1768,6 +1772,48 @@ no_memory: goto cleanup; } +static int remoteDispatchDomainMigrateOffline( + virNetServerPtr server ATTRIBUTE_UNUSED, + virNetServerClientPtr client, + virNetMessagePtr msg ATTRIBUTE_UNUSED, + virNetMessageErrorPtr rerr, + remote_domain_migrate_offline_args *args, + remote_domain_migrate_offline_ret *ret ATTRIBUTE_UNUSED) +{ + int rv = -1; + virStreamPtr st = NULL; + daemonClientStreamPtr stream = NULL; + daemonClientPrivatePtr priv = + virNetServerClientGetPrivateData(client); + + if (!priv->conn) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + st = virStreamNew(priv->conn, VIR_STREAM_NONBLOCK); + + if (!(stream = daemonCreateClientStream(client, st, remoteProgram, &msg->header))) + goto cleanup; + + if (virFDStreamCreateFile(st, + args->name, + 0, 0, + O_WRONLY, 0) < 0) + goto cleanup; + + + if (daemonAddClientStream(client, stream, false) < 0) + goto cleanup; + + rv = 0; + +cleanup: + if (rv < 0) + virNetMessageSaveError(rerr); + return rv; +} + static int remoteDispatchDomainMigratePrepare(virNetServerPtr server ATTRIBUTE_UNUSED, virNetServerClientPtr client ATTRIBUTE_UNUSED, diff --git a/docs/hvsupport.pl b/docs/hvsupport.pl index 4871739..47fc505 100755 --- a/docs/hvsupport.pl +++ b/docs/hvsupport.pl @@ -128,6 +128,8 @@ $apis{virDomainMigratePrepareTunnel3} = "0.9.2"; $apis{virDomainMigratePerform3} = "0.9.2"; $apis{virDomainMigrateFinish3} = "0.9.2"; $apis{virDomainMigrateConfirm3} = "0.9.2"; +$apis{virDomainMigrateOffline} = "0.10.1"; + diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index cfe5047..7c9cf3c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migration */ } virDomainMigrateFlags; /* Domain migration. */ @@ -1030,6 +1031,11 @@ int virDomainMigrateGetMaxSpeed(virDomainPtr domain, unsigned long *bandwidth, unsigned int flags); +int +virDomainMigrateOffline(virConnectPtr dconn, + char *file); + + /** * VIR_NODEINFO_MAXCPUS: * @nodeinfo: virNodeInfo instance diff --git a/python/generator.py b/python/generator.py index 7beb361..a1b1203 100755 --- a/python/generator.py +++ b/python/generator.py @@ -427,6 +427,7 @@ skip_impl = ( 'virDomainGetDiskErrors', 'virConnectUnregisterCloseCallback', 'virConnectRegisterCloseCallback', + 'virDomainMigrateOffline', ) qemu_skip_impl = ( diff --git a/src/driver.h b/src/driver.h index e88ab28..9041005 100644 --- a/src/driver.h +++ b/src/driver.h @@ -881,6 +881,10 @@ typedef char * int type, const char *uri, unsigned int flags); +typedef int + (*virDrvDomainMigrateOffline)(virConnectPtr dconn, + const char *file); + /** * _virDriver: @@ -1068,6 +1072,7 @@ struct _virDriver { virDrvDomainGetDiskErrors domainGetDiskErrors; virDrvDomainSetMetadata domainSetMetadata; virDrvDomainGetMetadata domainGetMetadata; + virDrvDomainMigrateOffline domainMigrateOffline; }; typedef int diff --git a/src/libvirt.c b/src/libvirt.c index b034ed6..2878384 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -5001,6 +5001,28 @@ virDomainMigratePeer2Peer (virDomainPtr domain, } } +/** + * virDomainMigrateOffline: + * @dconn: target connection handler + * @file: the file to push to target + * + * to handle offline migration + * Returns -1 if error, else 0 + */ +int +virDomainMigrateOffline(virConnectPtr dconn, + char *file) +{ + VIR_DEBUG("dconn=%p, file=%s", dconn, NULLSTR(file)); + + if (!VIR_IS_CONNECT (dconn)) { + virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + return dconn->driver->domainMigrateOffline(dconn, file); +} /* * In normal migration, the libvirt client co-ordinates communication diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 92ae95a..e6a7de7 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -550,6 +550,7 @@ LIBVIRT_0.10.0 { virConnectRegisterCloseCallback; virConnectUnregisterCloseCallback; virDomainGetSecurityLabelList; + virDomainMigrateOffline; virDomainPinEmulator; virDomainGetEmulatorPinInfo; } LIBVIRT_0.9.13; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index cf1f079..0952783 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -22,8 +22,12 @@ */ #include <config.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> #include <unistd.h> +#include <stdio.h> #include <assert.h> #include "virnetclient.h" @@ -5247,6 +5251,71 @@ done: return rv; } +static int +doRemoteReadFile(virStreamPtr st ATTRIBUTE_UNUSED, + char *buf, size_t nbytes, void *opaque) +{ + int *fd = opaque; + + return read(*fd, buf, nbytes); +} + +static int +remoteDomainMigrateOffline(virConnectPtr dconn, + const char *name) +{ + int rv = -1, fd = -1; + virStreamPtr st = virStreamNew(dconn, 0); + remote_domain_migrate_offline_args args; + remote_domain_migrate_offline_ret ret; + struct private_data *priv = dconn->privateData; + virNetClientStreamPtr netst = NULL; + + remoteDriverLock(priv); + + args.name = (char *)name; + memset(&ret, 0, sizeof(ret)); + + if (!(netst = virNetClientStreamNew(priv->remoteProgram, REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE, priv->counter))) + goto done; + if (virNetClientAddStream(priv->client, netst) < 0) { + virObjectUnref(netst); + goto done; + } + st->driver = &remoteStreamDrv; + st->privateData = netst; + + if ((fd = open(name, O_RDONLY)) < 0) + goto done; + if (fd == -1) + goto done; + + if (call (dconn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE, + (xdrproc_t) xdr_remote_domain_migrate_offline_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_offline_ret, (char *) &ret) == -1) { + virNetClientRemoveStream(priv->client, netst); + virObjectUnref(netst); + st->driver = NULL; + st->privateData = NULL; + goto done; + } + + remoteDriverUnlock(priv); + + if (virStreamSendAll(st, doRemoteReadFile, &fd) < 0) + goto done; + if (virStreamFinish(st) < 0) + goto done; + if (VIR_CLOSE(fd) < 0) + goto done; + + rv = 0; + +done: + return rv; +} + + static void remoteDomainEventQueue(struct private_data *priv, virDomainEventPtr event) { @@ -5491,6 +5560,7 @@ static virDriver remote_driver = { .domainEventDeregister = remoteDomainEventDeregister, /* 0.5.0 */ .domainMigratePrepare2 = remoteDomainMigratePrepare2, /* 0.5.0 */ .domainMigrateFinish2 = remoteDomainMigrateFinish2, /* 0.5.0 */ + .domainMigrateOffline = remoteDomainMigrateOffline, /* 0.10.1 */ .nodeDeviceDettach = remoteNodeDeviceDettach, /* 0.6.1 */ .nodeDeviceReAttach = remoteNodeDeviceReAttach, /* 0.6.1 */ .nodeDeviceReset = remoteNodeDeviceReset, /* 0.6.1 */ diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 085d5d9..c845737 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2558,6 +2558,13 @@ struct remote_connect_list_all_domains_ret { unsigned int ret; }; +struct remote_domain_migrate_offline_args { + remote_nonnull_string name; +}; + +struct remote_domain_migrate_offline_ret { + int retval; +}; /*----- Protocol. -----*/ @@ -2888,7 +2895,8 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_GET_HOSTNAME = 277, /* autogen autogen */ REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL_LIST = 278, /* skipgen skipgen priority:high */ REMOTE_PROC_DOMAIN_PIN_EMULATOR = 279, /* skipgen skipgen */ - REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO = 280 /* skipgen skipgen */ + REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO = 280, /* skipgen skipgen */ + REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE = 281 /* skipgen skipgen priority:low*/ /* * Notice how the entries are grouped in sets of 10 ? diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index dbcaa25..70f7694 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6698,9 +6698,66 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("migration when there's no domain active")}, {NULL, 0, 0, NULL} }; +static int +push_file(char dst[] ATTRIBUTE_UNUSED, char *file, virConnectPtr dconn) +{ + int ret = -1; + + ret = virDomainMigrateOffline(dconn, file); + + return ret; +} + +static void +vshMigrateOffline(vshControl *ctl, char *file, char dst[]) +{ + xmlDocPtr xml = NULL; + xmlXPathContextPtr ctxt = NULL; + xmlNodePtr *disks = NULL; + virConnectPtr dconn = NULL; + int i = 0, ret = 0; + char *src[] = {NULL}; + + if (!vshConnectionUsability(ctl, ctl->conn)) + return; + + xml = virXMLParseFileCtxt(file, &ctxt); + if (!xml) { + vshError(NULL, "%s", _("Fail to get domain information from")); + goto cleanup; + } + + ret = virXPathNodeSet("./devices/disk", ctxt, &disks); + if (ret < 0) { + vshError(NULL, "%s", _("Fail to get disk node")); + goto cleanup; + } + + dconn = virConnectOpen(dst); + if (!dconn) + goto cleanup; + vshPrint(ctl, "pushing %s to %s\n", file, dst); + if (push_file(dst, file, dconn) < 0) + goto cleanup; + for (i = 0 ; i < ret ; i++) { + ctxt->node = disks[i]; + src[i] = virXPathString("string(./source/@file" + "|./source/@dir" + "|./source/@name)", ctxt); + vshPrint(ctl, "pushing %s to %s\n", src[i], dst); + if (push_file(dst, src[i], dconn) < 0) + break; + } + +cleanup: + xmlXPathFreeContext(ctxt); + xmlFreeDoc(xml); +} + static void doMigrate(void *opaque) { @@ -6767,12 +6824,24 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE; + if (vshCommandOptBool(cmd, "offline")) { + flags |= VIR_MIGRATE_OFFLINE; + if (xmlfile == NULL) + vshError(ctl, _("please specify xmlfile for offline migration")); + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); goto out; } + if (flags & VIR_MIGRATE_OFFLINE) { + vshMigrateOffline(ctl, (char *)xmlfile, (char *)desturi); + goto out; + } + + if ((flags & VIR_MIGRATE_PEER2PEER) || vshCommandOptBool(cmd, "direct")) { /* For peer2peer migration or direct migration we only expect one URI -- 1.7.2.5

On Tue, Aug 28, 2012 at 01:38:32PM +0800, liguang wrote:
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
Please provide a full description of what this patch is supposed to be doing and why you've implemented it this way. I struggle to understand what on earth this patch is doing with the streams APIs, nor why we need a new public API for this. Daniel
--- daemon/remote.c | 46 +++++++++++++++++++++++++++ docs/hvsupport.pl | 2 + include/libvirt/libvirt.h.in | 6 +++ python/generator.py | 1 + src/driver.h | 5 +++ src/libvirt.c | 22 +++++++++++++ src/libvirt_public.syms | 1 + src/remote/remote_driver.c | 70 ++++++++++++++++++++++++++++++++++++++++++ src/remote/remote_protocol.x | 10 +++++- tools/virsh-domain.c | 69 +++++++++++++++++++++++++++++++++++++++++ 10 files changed, 231 insertions(+), 1 deletions(-)
diff --git a/daemon/remote.c b/daemon/remote.c index 24928f4..c47a580 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -21,6 +21,9 @@ */
#include <config.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h>
#include "virterror_internal.h"
@@ -48,6 +51,7 @@ #include "virdbus.h" #include "remote_protocol.h" #include "qemu_protocol.h" +#include "fdstream.h"
#define VIR_FROM_THIS VIR_FROM_RPC @@ -1768,6 +1772,48 @@ no_memory: goto cleanup; }
+static int remoteDispatchDomainMigrateOffline( + virNetServerPtr server ATTRIBUTE_UNUSED, + virNetServerClientPtr client, + virNetMessagePtr msg ATTRIBUTE_UNUSED, + virNetMessageErrorPtr rerr, + remote_domain_migrate_offline_args *args, + remote_domain_migrate_offline_ret *ret ATTRIBUTE_UNUSED) +{ + int rv = -1; + virStreamPtr st = NULL; + daemonClientStreamPtr stream = NULL; + daemonClientPrivatePtr priv = + virNetServerClientGetPrivateData(client); + + if (!priv->conn) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + st = virStreamNew(priv->conn, VIR_STREAM_NONBLOCK); + + if (!(stream = daemonCreateClientStream(client, st, remoteProgram, &msg->header))) + goto cleanup; + + if (virFDStreamCreateFile(st, + args->name, + 0, 0, + O_WRONLY, 0) < 0) + goto cleanup; + + + if (daemonAddClientStream(client, stream, false) < 0) + goto cleanup; + + rv = 0; + +cleanup: + if (rv < 0) + virNetMessageSaveError(rerr); + return rv; +} + static int remoteDispatchDomainMigratePrepare(virNetServerPtr server ATTRIBUTE_UNUSED, virNetServerClientPtr client ATTRIBUTE_UNUSED, diff --git a/docs/hvsupport.pl b/docs/hvsupport.pl index 4871739..47fc505 100755 --- a/docs/hvsupport.pl +++ b/docs/hvsupport.pl @@ -128,6 +128,8 @@ $apis{virDomainMigratePrepareTunnel3} = "0.9.2"; $apis{virDomainMigratePerform3} = "0.9.2"; $apis{virDomainMigrateFinish3} = "0.9.2"; $apis{virDomainMigrateConfirm3} = "0.9.2"; +$apis{virDomainMigrateOffline} = "0.10.1"; +
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index cfe5047..7c9cf3c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migration */ } virDomainMigrateFlags;
/* Domain migration. */ @@ -1030,6 +1031,11 @@ int virDomainMigrateGetMaxSpeed(virDomainPtr domain, unsigned long *bandwidth, unsigned int flags);
+int +virDomainMigrateOffline(virConnectPtr dconn, + char *file); + + /** * VIR_NODEINFO_MAXCPUS: * @nodeinfo: virNodeInfo instance diff --git a/python/generator.py b/python/generator.py index 7beb361..a1b1203 100755 --- a/python/generator.py +++ b/python/generator.py @@ -427,6 +427,7 @@ skip_impl = ( 'virDomainGetDiskErrors', 'virConnectUnregisterCloseCallback', 'virConnectRegisterCloseCallback', + 'virDomainMigrateOffline', )
qemu_skip_impl = ( diff --git a/src/driver.h b/src/driver.h index e88ab28..9041005 100644 --- a/src/driver.h +++ b/src/driver.h @@ -881,6 +881,10 @@ typedef char * int type, const char *uri, unsigned int flags); +typedef int + (*virDrvDomainMigrateOffline)(virConnectPtr dconn, + const char *file); +
/** * _virDriver: @@ -1068,6 +1072,7 @@ struct _virDriver { virDrvDomainGetDiskErrors domainGetDiskErrors; virDrvDomainSetMetadata domainSetMetadata; virDrvDomainGetMetadata domainGetMetadata; + virDrvDomainMigrateOffline domainMigrateOffline; };
typedef int diff --git a/src/libvirt.c b/src/libvirt.c index b034ed6..2878384 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -5001,6 +5001,28 @@ virDomainMigratePeer2Peer (virDomainPtr domain, } }
+/** + * virDomainMigrateOffline: + * @dconn: target connection handler + * @file: the file to push to target + * + * to handle offline migration + * Returns -1 if error, else 0 + */ +int +virDomainMigrateOffline(virConnectPtr dconn, + char *file) +{ + VIR_DEBUG("dconn=%p, file=%s", dconn, NULLSTR(file)); + + if (!VIR_IS_CONNECT (dconn)) { + virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + return dconn->driver->domainMigrateOffline(dconn, file); +}
/* * In normal migration, the libvirt client co-ordinates communication diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 92ae95a..e6a7de7 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -550,6 +550,7 @@ LIBVIRT_0.10.0 { virConnectRegisterCloseCallback; virConnectUnregisterCloseCallback; virDomainGetSecurityLabelList; + virDomainMigrateOffline; virDomainPinEmulator; virDomainGetEmulatorPinInfo; } LIBVIRT_0.9.13; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index cf1f079..0952783 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -22,8 +22,12 @@ */
#include <config.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h>
#include <unistd.h> +#include <stdio.h> #include <assert.h>
#include "virnetclient.h" @@ -5247,6 +5251,71 @@ done: return rv; }
+static int +doRemoteReadFile(virStreamPtr st ATTRIBUTE_UNUSED, + char *buf, size_t nbytes, void *opaque) +{ + int *fd = opaque; + + return read(*fd, buf, nbytes); +} + +static int +remoteDomainMigrateOffline(virConnectPtr dconn, + const char *name) +{ + int rv = -1, fd = -1; + virStreamPtr st = virStreamNew(dconn, 0); + remote_domain_migrate_offline_args args; + remote_domain_migrate_offline_ret ret; + struct private_data *priv = dconn->privateData; + virNetClientStreamPtr netst = NULL; + + remoteDriverLock(priv); + + args.name = (char *)name; + memset(&ret, 0, sizeof(ret)); + + if (!(netst = virNetClientStreamNew(priv->remoteProgram, REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE, priv->counter))) + goto done; + if (virNetClientAddStream(priv->client, netst) < 0) { + virObjectUnref(netst); + goto done; + } + st->driver = &remoteStreamDrv; + st->privateData = netst; + + if ((fd = open(name, O_RDONLY)) < 0) + goto done; + if (fd == -1) + goto done; + + if (call (dconn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE, + (xdrproc_t) xdr_remote_domain_migrate_offline_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_offline_ret, (char *) &ret) == -1) { + virNetClientRemoveStream(priv->client, netst); + virObjectUnref(netst); + st->driver = NULL; + st->privateData = NULL; + goto done; + } + + remoteDriverUnlock(priv); + + if (virStreamSendAll(st, doRemoteReadFile, &fd) < 0) + goto done; + if (virStreamFinish(st) < 0) + goto done; + if (VIR_CLOSE(fd) < 0) + goto done; + + rv = 0; + +done: + return rv; +} + + static void remoteDomainEventQueue(struct private_data *priv, virDomainEventPtr event) { @@ -5491,6 +5560,7 @@ static virDriver remote_driver = { .domainEventDeregister = remoteDomainEventDeregister, /* 0.5.0 */ .domainMigratePrepare2 = remoteDomainMigratePrepare2, /* 0.5.0 */ .domainMigrateFinish2 = remoteDomainMigrateFinish2, /* 0.5.0 */ + .domainMigrateOffline = remoteDomainMigrateOffline, /* 0.10.1 */ .nodeDeviceDettach = remoteNodeDeviceDettach, /* 0.6.1 */ .nodeDeviceReAttach = remoteNodeDeviceReAttach, /* 0.6.1 */ .nodeDeviceReset = remoteNodeDeviceReset, /* 0.6.1 */ diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 085d5d9..c845737 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2558,6 +2558,13 @@ struct remote_connect_list_all_domains_ret { unsigned int ret; };
+struct remote_domain_migrate_offline_args { + remote_nonnull_string name; +}; + +struct remote_domain_migrate_offline_ret { + int retval; +};
/*----- Protocol. -----*/
@@ -2888,7 +2895,8 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_GET_HOSTNAME = 277, /* autogen autogen */ REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL_LIST = 278, /* skipgen skipgen priority:high */ REMOTE_PROC_DOMAIN_PIN_EMULATOR = 279, /* skipgen skipgen */ - REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO = 280 /* skipgen skipgen */ + REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO = 280, /* skipgen skipgen */ + REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE = 281 /* skipgen skipgen priority:low*/
/* * Notice how the entries are grouped in sets of 10 ? diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index dbcaa25..70f7694 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6698,9 +6698,66 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("migration when there's no domain active")}, {NULL, 0, 0, NULL} };
+static int +push_file(char dst[] ATTRIBUTE_UNUSED, char *file, virConnectPtr dconn) +{ + int ret = -1; + + ret = virDomainMigrateOffline(dconn, file); + + return ret; +} + +static void +vshMigrateOffline(vshControl *ctl, char *file, char dst[]) +{ + xmlDocPtr xml = NULL; + xmlXPathContextPtr ctxt = NULL; + xmlNodePtr *disks = NULL; + virConnectPtr dconn = NULL; + int i = 0, ret = 0; + char *src[] = {NULL}; + + if (!vshConnectionUsability(ctl, ctl->conn)) + return; + + xml = virXMLParseFileCtxt(file, &ctxt); + if (!xml) { + vshError(NULL, "%s", _("Fail to get domain information from")); + goto cleanup; + } + + ret = virXPathNodeSet("./devices/disk", ctxt, &disks); + if (ret < 0) { + vshError(NULL, "%s", _("Fail to get disk node")); + goto cleanup; + } + + dconn = virConnectOpen(dst); + if (!dconn) + goto cleanup; + vshPrint(ctl, "pushing %s to %s\n", file, dst); + if (push_file(dst, file, dconn) < 0) + goto cleanup; + for (i = 0 ; i < ret ; i++) { + ctxt->node = disks[i]; + src[i] = virXPathString("string(./source/@file" + "|./source/@dir" + "|./source/@name)", ctxt); + vshPrint(ctl, "pushing %s to %s\n", src[i], dst); + if (push_file(dst, src[i], dconn) < 0) + break; + } + +cleanup: + xmlXPathFreeContext(ctxt); + xmlFreeDoc(xml); +} + static void doMigrate(void *opaque) { @@ -6767,12 +6824,24 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + flags |= VIR_MIGRATE_OFFLINE; + if (xmlfile == NULL) + vshError(ctl, _("please specify xmlfile for offline migration")); + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); goto out; }
+ if (flags & VIR_MIGRATE_OFFLINE) { + vshMigrateOffline(ctl, (char *)xmlfile, (char *)desturi); + goto out; + } + + if ((flags & VIR_MIGRATE_PEER2PEER) || vshCommandOptBool(cmd, "direct")) { /* For peer2peer migration or direct migration we only expect one URI -- 1.7.2.5
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
-- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 08/27/2012 11:20 PM, Daniel P. Berrange wrote:
On Tue, Aug 28, 2012 at 01:38:32PM +0800, liguang wrote:
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
Please provide a full description of what this patch is supposed to be doing and why you've implemented it this way. I struggle to understand what on earth this patch is doing with the streams APIs, nor why we need a new public API for this.
I agree with Daniel that we do not need a new API, but that the existing API is sufficient. Remember, virDomainMigrate will eventually boil down to these RPC transactions if both sides support v3: src: begin - pass the xml to dst (unchanged for offline) dst: prepare - get ready to accept incoming VM (online starts a new qemu process, offline migration has nothing to do beyond defining the xml just received) src: perform - start the migration (online triggers the migration of vm state; offline migration has nothing to do since there is no vm state) dst: finish - wait for completion and kill on failure (online continues the qemu process to run the vm, offline has nothing further to do) src: confirm - final cleanup (unchanged for offline) That is, I think you should modify the existing APIs to delete their current check that does an early exit for an offline domain, and instead handle offline domains by migrating the XML definition. If you are also trying to allow migration of non-shared disk images (that is, implement the 'virsh migrate --copy-storage-all' flag) for offline VMs, that would be where you have to use virStream under the hood, since you no longer have qemu doing it on your behalf. But set up that stream and coordinate its progress using the 5 existing steps of v3 migration, not by creating a new API. Furthermore, recall that the existing qemu implementation of live migration of storage alongside vm state is considered deprecated, and that for qemu 1.3, we already have to use different methodologies to continue to support the same top-level semantics of a --copy-storage-all flag (most likely, by libvirt setting up the source to be an NBD server, the destination to connect to that NBD server as a backing file, then do a block pull, and finally breaking the NBD connection down when everything is migrated). Depending on how things to with upstream qemu on this design change, it may impact how we do live migration of non-shared storage, and we should be looking at how best to share the work between both online and offline migration. -- Eric Blake eblake@redhat.com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

在 2012-08-27一的 23:38 -0700,Eric Blake写道:
On 08/27/2012 11:20 PM, Daniel P. Berrange wrote:
On Tue, Aug 28, 2012 at 01:38:32PM +0800, liguang wrote:
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
Please provide a full description of what this patch is supposed to be doing and why you've implemented it this way. I struggle to understand what on earth this patch is doing with the streams APIs, nor why we need a new public API for this.
I agree with Daniel that we do not need a new API, but that the existing API is sufficient. Remember, virDomainMigrate will eventually boil down to these RPC transactions if both sides support v3:
src: begin - pass the xml to dst (unchanged for offline) dst: prepare - get ready to accept incoming VM (online starts a new qemu
but, how can dst knows the prepared migration is offline or not at src? virDomainObjIsActive will always return 0 at dst at prepare phase, so, how can we setup a distinguished stream for offline migration here? a special parameter for this purpose? roughly way, a modified API same with a new API, I think.
process, offline migration has nothing to do beyond defining the xml just received) src: perform - start the migration (online triggers the migration of vm state; offline migration has nothing to do since there is no vm state) dst: finish - wait for completion and kill on failure (online continues the qemu process to run the vm, offline has nothing further to do) src: confirm - final cleanup (unchanged for offline)
That is, I think you should modify the existing APIs to delete their current check that does an early exit for an offline domain, and instead handle offline domains by migrating the XML definition. If you are also trying to allow migration of non-shared disk images (that is, implement the 'virsh migrate --copy-storage-all' flag) for offline VMs, that would be where you have to use virStream under the hood, since you no longer have qemu doing it on your behalf. But set up that stream and coordinate its progress using the 5 existing steps of v3 migration, not by creating a new API.
Furthermore, recall that the existing qemu implementation of live migration of storage alongside vm state is considered deprecated, and that for qemu 1.3, we already have to use different methodologies to continue to support the same top-level semantics of a --copy-storage-all flag (most likely, by libvirt setting up the source to be an NBD server, the destination to connect to that NBD server as a backing file, then do a block pull, and finally breaking the NBD connection down when everything is migrated). Depending on how things to with upstream qemu on this design change, it may impact how we do live migration of non-shared storage, and we should be looking at how best to share the work between both online and offline migration.
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team

On 08/28/2012 02:21 AM, liguang wrote:
src: begin - pass the xml to dst (unchanged for offline) dst: prepare - get ready to accept incoming VM (online starts a new qemu
but, how can dst knows the prepared migration is offline or not at src?
By the cookie that was passed in from src from the begin phase. That is, you need to expand the cookie to pass one more piece of information - whether this is a live or offline migration.
virDomainObjIsActive will always return 0 at dst at prepare phase, so, how can we setup a distinguished stream for offline migration here? a special parameter for this purpose? roughly way, a modified API same with a new API, I think.
No, we do not need a new API. -- Eric Blake eblake@redhat.com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

在 2012-08-27一的 23:38 -0700,Eric Blake写道:
On 08/27/2012 11:20 PM, Daniel P. Berrange wrote:
On Tue, Aug 28, 2012 at 01:38:32PM +0800, liguang wrote:
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
Please provide a full description of what this patch is supposed to be doing and why you've implemented it this way. I struggle to understand what on earth this patch is doing with the streams APIs, nor why we need a new public API for this.
I agree with Daniel that we do not need a new API, but that the existing API is sufficient. Remember, virDomainMigrate will eventually boil down to these RPC transactions if both sides support v3:
src: begin - pass the xml to dst (unchanged for offline) dst: prepare - get ready to accept incoming VM (online starts a new qemu process, offline migration has nothing to do beyond defining the xml just received) src: perform - start the migration (online triggers the migration of vm state; offline migration has nothing to do since there is no vm state) dst: finish - wait for completion and kill on failure (online continues the qemu process to run the vm, offline has nothing further to do) src: confirm - final cleanup (unchanged for offline)
That is, I think you should modify the existing APIs to delete their current check that does an early exit for an offline domain, and instead handle offline domains by migrating the XML definition. If you are also trying to allow migration of non-shared disk images (that is, implement the 'virsh migrate --copy-storage-all' flag) for offline VMs, that would be where you have to use virStream under the hood, since you no longer have qemu doing it on your behalf. But set up that stream and
yes, maybe, but as i see, MigratePrepareTunnel3 can only handler 1 stream once called, so only 1 file can be transferred, if try to migrate multi-files, will be very hard, e.g. use cookie to pass file info to remote, and at remote side, doPeer2PeerMigrate3 may create a single thread to parse file info and setup streams for data transferring, but it had to exit before we do the real stream sending, so the new thread will be zombie, isn't it? that's my consideration.
coordinate its progress using the 5 existing steps of v3 migration, not by creating a new API.
Furthermore, recall that the existing qemu implementation of live migration of storage alongside vm state is considered deprecated, and that for qemu 1.3, we already have to use different methodologies to continue to support the same top-level semantics of a --copy-storage-all flag (most likely, by libvirt setting up the source to be an NBD server, the destination to connect to that NBD server as a backing file, then do a block pull, and finally breaking the NBD connection down when everything is migrated). Depending on how things to with upstream qemu on this design change, it may impact how we do live migration of non-shared storage, and we should be looking at how best to share the work between both online and offline migration.
-- liguang lig.fnst@cn.fujitsu.com FNST linux kernel team

Actually, I've sent some description before, by now, as I know, in a condition when a domain only defined, but not active(so called offline migration),some one may require the domain to be active at another remote server, not the local, libvirt can't do it. so, I try to construct a kludge to serve this desire, it implement a new API named virDomainMigrateOffline, because it's hard to add code snippet to virDomainMigrate and its following code path to support offline migration with remote protocol, my kludge will transfer data the domain demanded to the remote server by stream, so have nothing to do with VMs. 在 2012-08-27一的 23:20 -0700,Daniel P. Berrange写道:
On Tue, Aug 28, 2012 at 01:38:32PM +0800, liguang wrote:
Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
Please provide a full description of what this patch is supposed to be doing and why you've implemented it this way. I struggle to understand what on earth this patch is doing with the streams APIs, nor why we need a new public API for this.
Daniel
--- daemon/remote.c | 46 +++++++++++++++++++++++++++ docs/hvsupport.pl | 2 + include/libvirt/libvirt.h.in | 6 +++ python/generator.py | 1 + src/driver.h | 5 +++ src/libvirt.c | 22 +++++++++++++ src/libvirt_public.syms | 1 + src/remote/remote_driver.c | 70 ++++++++++++++++++++++++++++++++++++++++++ src/remote/remote_protocol.x | 10 +++++- tools/virsh-domain.c | 69 +++++++++++++++++++++++++++++++++++++++++ 10 files changed, 231 insertions(+), 1 deletions(-)
diff --git a/daemon/remote.c b/daemon/remote.c index 24928f4..c47a580 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -21,6 +21,9 @@ */
#include <config.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h>
#include "virterror_internal.h"
@@ -48,6 +51,7 @@ #include "virdbus.h" #include "remote_protocol.h" #include "qemu_protocol.h" +#include "fdstream.h"
#define VIR_FROM_THIS VIR_FROM_RPC @@ -1768,6 +1772,48 @@ no_memory: goto cleanup; }
+static int remoteDispatchDomainMigrateOffline( + virNetServerPtr server ATTRIBUTE_UNUSED, + virNetServerClientPtr client, + virNetMessagePtr msg ATTRIBUTE_UNUSED, + virNetMessageErrorPtr rerr, + remote_domain_migrate_offline_args *args, + remote_domain_migrate_offline_ret *ret ATTRIBUTE_UNUSED) +{ + int rv = -1; + virStreamPtr st = NULL; + daemonClientStreamPtr stream = NULL; + daemonClientPrivatePtr priv = + virNetServerClientGetPrivateData(client); + + if (!priv->conn) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + st = virStreamNew(priv->conn, VIR_STREAM_NONBLOCK); + + if (!(stream = daemonCreateClientStream(client, st, remoteProgram, &msg->header))) + goto cleanup; + + if (virFDStreamCreateFile(st, + args->name, + 0, 0, + O_WRONLY, 0) < 0) + goto cleanup; + + + if (daemonAddClientStream(client, stream, false) < 0) + goto cleanup; + + rv = 0; + +cleanup: + if (rv < 0) + virNetMessageSaveError(rerr); + return rv; +} + static int remoteDispatchDomainMigratePrepare(virNetServerPtr server ATTRIBUTE_UNUSED, virNetServerClientPtr client ATTRIBUTE_UNUSED, diff --git a/docs/hvsupport.pl b/docs/hvsupport.pl index 4871739..47fc505 100755 --- a/docs/hvsupport.pl +++ b/docs/hvsupport.pl @@ -128,6 +128,8 @@ $apis{virDomainMigratePrepareTunnel3} = "0.9.2"; $apis{virDomainMigratePerform3} = "0.9.2"; $apis{virDomainMigrateFinish3} = "0.9.2"; $apis{virDomainMigrateConfirm3} = "0.9.2"; +$apis{virDomainMigrateOffline} = "0.10.1"; +
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index cfe5047..7c9cf3c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -995,6 +995,7 @@ typedef enum { * whole migration process; this will be used automatically * when supported */ VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */ + VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migration */ } virDomainMigrateFlags;
/* Domain migration. */ @@ -1030,6 +1031,11 @@ int virDomainMigrateGetMaxSpeed(virDomainPtr domain, unsigned long *bandwidth, unsigned int flags);
+int +virDomainMigrateOffline(virConnectPtr dconn, + char *file); + + /** * VIR_NODEINFO_MAXCPUS: * @nodeinfo: virNodeInfo instance diff --git a/python/generator.py b/python/generator.py index 7beb361..a1b1203 100755 --- a/python/generator.py +++ b/python/generator.py @@ -427,6 +427,7 @@ skip_impl = ( 'virDomainGetDiskErrors', 'virConnectUnregisterCloseCallback', 'virConnectRegisterCloseCallback', + 'virDomainMigrateOffline', )
qemu_skip_impl = ( diff --git a/src/driver.h b/src/driver.h index e88ab28..9041005 100644 --- a/src/driver.h +++ b/src/driver.h @@ -881,6 +881,10 @@ typedef char * int type, const char *uri, unsigned int flags); +typedef int + (*virDrvDomainMigrateOffline)(virConnectPtr dconn, + const char *file); +
/** * _virDriver: @@ -1068,6 +1072,7 @@ struct _virDriver { virDrvDomainGetDiskErrors domainGetDiskErrors; virDrvDomainSetMetadata domainSetMetadata; virDrvDomainGetMetadata domainGetMetadata; + virDrvDomainMigrateOffline domainMigrateOffline; };
typedef int diff --git a/src/libvirt.c b/src/libvirt.c index b034ed6..2878384 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -5001,6 +5001,28 @@ virDomainMigratePeer2Peer (virDomainPtr domain, } }
+/** + * virDomainMigrateOffline: + * @dconn: target connection handler + * @file: the file to push to target + * + * to handle offline migration + * Returns -1 if error, else 0 + */ +int +virDomainMigrateOffline(virConnectPtr dconn, + char *file) +{ + VIR_DEBUG("dconn=%p, file=%s", dconn, NULLSTR(file)); + + if (!VIR_IS_CONNECT (dconn)) { + virLibConnError(VIR_ERR_INVALID_CONN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + return dconn->driver->domainMigrateOffline(dconn, file); +}
/* * In normal migration, the libvirt client co-ordinates communication diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 92ae95a..e6a7de7 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -550,6 +550,7 @@ LIBVIRT_0.10.0 { virConnectRegisterCloseCallback; virConnectUnregisterCloseCallback; virDomainGetSecurityLabelList; + virDomainMigrateOffline; virDomainPinEmulator; virDomainGetEmulatorPinInfo; } LIBVIRT_0.9.13; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index cf1f079..0952783 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -22,8 +22,12 @@ */
#include <config.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h>
#include <unistd.h> +#include <stdio.h> #include <assert.h>
#include "virnetclient.h" @@ -5247,6 +5251,71 @@ done: return rv; }
+static int +doRemoteReadFile(virStreamPtr st ATTRIBUTE_UNUSED, + char *buf, size_t nbytes, void *opaque) +{ + int *fd = opaque; + + return read(*fd, buf, nbytes); +} + +static int +remoteDomainMigrateOffline(virConnectPtr dconn, + const char *name) +{ + int rv = -1, fd = -1; + virStreamPtr st = virStreamNew(dconn, 0); + remote_domain_migrate_offline_args args; + remote_domain_migrate_offline_ret ret; + struct private_data *priv = dconn->privateData; + virNetClientStreamPtr netst = NULL; + + remoteDriverLock(priv); + + args.name = (char *)name; + memset(&ret, 0, sizeof(ret)); + + if (!(netst = virNetClientStreamNew(priv->remoteProgram, REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE, priv->counter))) + goto done; + if (virNetClientAddStream(priv->client, netst) < 0) { + virObjectUnref(netst); + goto done; + } + st->driver = &remoteStreamDrv; + st->privateData = netst; + + if ((fd = open(name, O_RDONLY)) < 0) + goto done; + if (fd == -1) + goto done; + + if (call (dconn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE, + (xdrproc_t) xdr_remote_domain_migrate_offline_args, (char *) &args, + (xdrproc_t) xdr_remote_domain_migrate_offline_ret, (char *) &ret) == -1) { + virNetClientRemoveStream(priv->client, netst); + virObjectUnref(netst); + st->driver = NULL; + st->privateData = NULL; + goto done; + } + + remoteDriverUnlock(priv); + + if (virStreamSendAll(st, doRemoteReadFile, &fd) < 0) + goto done; + if (virStreamFinish(st) < 0) + goto done; + if (VIR_CLOSE(fd) < 0) + goto done; + + rv = 0; + +done: + return rv; +} + + static void remoteDomainEventQueue(struct private_data *priv, virDomainEventPtr event) { @@ -5491,6 +5560,7 @@ static virDriver remote_driver = { .domainEventDeregister = remoteDomainEventDeregister, /* 0.5.0 */ .domainMigratePrepare2 = remoteDomainMigratePrepare2, /* 0.5.0 */ .domainMigrateFinish2 = remoteDomainMigrateFinish2, /* 0.5.0 */ + .domainMigrateOffline = remoteDomainMigrateOffline, /* 0.10.1 */ .nodeDeviceDettach = remoteNodeDeviceDettach, /* 0.6.1 */ .nodeDeviceReAttach = remoteNodeDeviceReAttach, /* 0.6.1 */ .nodeDeviceReset = remoteNodeDeviceReset, /* 0.6.1 */ diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 085d5d9..c845737 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2558,6 +2558,13 @@ struct remote_connect_list_all_domains_ret { unsigned int ret; };
+struct remote_domain_migrate_offline_args { + remote_nonnull_string name; +}; + +struct remote_domain_migrate_offline_ret { + int retval; +};
/*----- Protocol. -----*/
@@ -2888,7 +2895,8 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_GET_HOSTNAME = 277, /* autogen autogen */ REMOTE_PROC_DOMAIN_GET_SECURITY_LABEL_LIST = 278, /* skipgen skipgen priority:high */ REMOTE_PROC_DOMAIN_PIN_EMULATOR = 279, /* skipgen skipgen */ - REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO = 280 /* skipgen skipgen */ + REMOTE_PROC_DOMAIN_GET_EMULATOR_PIN_INFO = 280, /* skipgen skipgen */ + REMOTE_PROC_DOMAIN_MIGRATE_OFFLINE = 281 /* skipgen skipgen priority:low*/
/* * Notice how the entries are grouped in sets of 10 ? diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index dbcaa25..70f7694 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -6698,9 +6698,66 @@ static const vshCmdOptDef opts_migrate[] = { {"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")}, {"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")}, {"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")}, + {"offline", VSH_OT_BOOL, 0, N_("migration when there's no domain active")}, {NULL, 0, 0, NULL} };
+static int +push_file(char dst[] ATTRIBUTE_UNUSED, char *file, virConnectPtr dconn) +{ + int ret = -1; + + ret = virDomainMigrateOffline(dconn, file); + + return ret; +} + +static void +vshMigrateOffline(vshControl *ctl, char *file, char dst[]) +{ + xmlDocPtr xml = NULL; + xmlXPathContextPtr ctxt = NULL; + xmlNodePtr *disks = NULL; + virConnectPtr dconn = NULL; + int i = 0, ret = 0; + char *src[] = {NULL}; + + if (!vshConnectionUsability(ctl, ctl->conn)) + return; + + xml = virXMLParseFileCtxt(file, &ctxt); + if (!xml) { + vshError(NULL, "%s", _("Fail to get domain information from")); + goto cleanup; + } + + ret = virXPathNodeSet("./devices/disk", ctxt, &disks); + if (ret < 0) { + vshError(NULL, "%s", _("Fail to get disk node")); + goto cleanup; + } + + dconn = virConnectOpen(dst); + if (!dconn) + goto cleanup; + vshPrint(ctl, "pushing %s to %s\n", file, dst); + if (push_file(dst, file, dconn) < 0) + goto cleanup; + for (i = 0 ; i < ret ; i++) { + ctxt->node = disks[i]; + src[i] = virXPathString("string(./source/@file" + "|./source/@dir" + "|./source/@name)", ctxt); + vshPrint(ctl, "pushing %s to %s\n", src[i], dst); + if (push_file(dst, src[i], dconn) < 0) + break; + } + +cleanup: + xmlXPathFreeContext(ctxt); + xmlFreeDoc(xml); +} + static void doMigrate(void *opaque) { @@ -6767,12 +6824,24 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "unsafe")) flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) { + flags |= VIR_MIGRATE_OFFLINE; + if (xmlfile == NULL) + vshError(ctl, _("please specify xmlfile for offline migration")); + } + if (xmlfile && virFileReadAll(xmlfile, 8192, &xml) < 0) { vshError(ctl, _("file '%s' doesn't exist"), xmlfile); goto out; }
+ if (flags & VIR_MIGRATE_OFFLINE) { + vshMigrateOffline(ctl, (char *)xmlfile, (char *)desturi); + goto out; + } + + if ((flags & VIR_MIGRATE_PEER2PEER) || vshCommandOptBool(cmd, "direct")) { /* For peer2peer migration or direct migration we only expect one URI -- 1.7.2.5
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
participants (3)
-
Daniel P. Berrange
-
Eric Blake
-
liguang