This allows:
a) migration without access to network
b) complete control of the migration stream
c) easy migration between containerised libvirt daemons on the same host
Resolves:
https://bugzilla.redhat.com/1638889
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
docs/manpages/virsh.rst | 17 ++++-
docs/migration.html.in | 33 ++++++++++
src/qemu/qemu_migration.c | 128 +++++++++++++++++++++++++++-----------
3 files changed, 139 insertions(+), 39 deletions(-)
diff --git a/docs/manpages/virsh.rst b/docs/manpages/virsh.rst
index cbb3c18deb30..82f7c9f77488 100644
--- a/docs/manpages/virsh.rst
+++ b/docs/manpages/virsh.rst
@@ -3237,12 +3237,12 @@ has different semantics:
In a special circumstance where you require a complete control of the connection
and/or libvirt does not have network access to the remote side you can use a
-unix transport in the URI and specify a socket path in the query, for example
+UNIX transport in the URI and specify a socket path in the query, for example
with the qemu driver you could use this:
.. code-block::
- qemu+unix://?socket=/path/to/socket
+ qemu+unix:///system?socket=/path/to/socket
When *migrateuri* is not specified, libvirt will automatically determine the
hypervisor specific URI. Some hypervisors, including QEMU, have an optional
@@ -3270,6 +3270,14 @@ There are a few scenarios where specifying *migrateuri* may help:
might be specified to choose a specific port number outside the default range in
order to comply with local firewall policies.
+* The *desturi* uses UNIX transport method. In this advanced case libvirt
+ should not guess a *migrateuri* and it should be specified using
+ UNIX socket path URI:
+
+.. code-block::
+
+ unix://?socket=/path/to/socket
+
See `https://libvirt.org/migration.html#uris
<
https://libvirt.org/migration.html#uris>`_ for more details on
migration URIs.
@@ -3296,7 +3304,10 @@ specific parameters separated by '&'. Currently
recognized parameters are
Optional *listen-address* sets the listen address that hypervisor on the
destination side should bind to for incoming migration. Both IPv4 and IPv6
addresses are accepted as well as hostnames (the resolving is done on
-destination). Some hypervisors do not support this feature and will return an
+destination). In niche scenarios you can also use UNIX socket to make the
+hypervisor connection over UNIX socket in which case you must make sure the
+source can connect to the destination using the socket path provided by you.
+Some hypervisors do not support specifying the listen address and will return an
error if this parameter is used.
Optional *disks-port* sets the port that hypervisor on destination side should
diff --git a/docs/migration.html.in b/docs/migration.html.in
index e95ee9de6f1b..9c8417674b22 100644
--- a/docs/migration.html.in
+++ b/docs/migration.html.in
@@ -201,6 +201,9 @@
numbers. In the latter case the management application may wish
to choose a specific port number outside the default range in order
to comply with local firewall policies.</li>
+ <li>The second URI uses UNIX transport method. In this advanced case
+ libvirt should not guess a *migrateuri* and it should be specified using
+ UNIX socket path URI:
<code>unix://?socket=/path/to/socket</code>.</li>
</ol>
<h2><a id="config">Configuration file
handling</a></h2>
@@ -628,5 +631,35 @@ virsh migrate --p2p --tunnelled web1 qemu+ssh://desthost/system
qemu+ssh://10.0.
Supported by QEMU driver
</p>
+
+ <h3><a id="scenariounixsocket">Migration using only UNIX
sockets</a></h3>
+
+ <p>
+ In a niche scenarion where libvirt daemon does not have access to the
+ network (e.g. running in a restricted container on a host that has
+ accessible network), when a management application wants to have complete
+ control over the transfer or when migrating between two containers on the
+ same host all the communication can be done using UNIX sockets. This
+ includes connecting to non-standard socket path for the destination
+ daemon, using UNIX sockets for hypervisor's communication or for the NBD
+ data transfer. All of that can be used with both peer2peer and direct
+ migration options.
+ </p>
+
+ <p>
+ Example using <code>/tmp/migdir</code> as a directory representing the
+ same path visible from both libvirt daemons. That can be achieved by
+ bind-mounting the same directory to different containers running separate
+ daemons or forwarding connections to these sockets manually
+ (using <code>socat</code>, <code>netcat</code> or a custom
piece of
+ software):
+ <pre>
+virsh migrate web1 [--p2p] --copy-storage-all
'qemu+unix:///system?socket=/tmp/migdir/test-sock-driver'
'unix://?socket=/tmp/migdir/test-sock-qemu' [--listen-address
/tmp/migdir/test-sock-qemu] --disks-socket /tmp/migdir/test-sock-nbd
+ </pre>
+
+ <p>
+ Supported by QEMU driver
+ </p>
+
</body>
</html>
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 3f4690f8fb72..1158d152869c 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -2943,34 +2943,41 @@ qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
}
if (STRNEQ(uri->scheme, "tcp") &&
- STRNEQ(uri->scheme, "rdma")) {
+ STRNEQ(uri->scheme, "rdma") &&
+ STRNEQ(uri->scheme, "unix")) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
_("unsupported scheme %s in migration URI %s"),
uri->scheme, uri_in);
goto cleanup;
}
- if (uri->server == NULL) {
- virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
- " URI: %s"), uri_in);
- goto cleanup;
- }
-
- if (uri->port == 0) {
- if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
+ if (STREQ(uri->scheme, "unix")) {
+ autoPort = false;
+ if (!listenAddress)
+ listenAddress = virURIGetParam(uri, "socket");
+ } else {
+ if (uri->server == NULL) {
+ virReportError(VIR_ERR_INVALID_ARG, _("missing host in
migration"
+ " URI: %s"), uri_in);
goto cleanup;
+ }
- /* Send well-formed URI only if uri_in was well-formed */
- if (well_formed_uri) {
- uri->port = port;
- if (!(*uri_out = virURIFormat(uri)))
+ if (uri->port == 0) {
+ if (virPortAllocatorAcquire(driver->migrationPorts, &port) <
0)
goto cleanup;
+
+ /* Send well-formed URI only if uri_in was well-formed */
+ if (well_formed_uri) {
+ uri->port = port;
+ if (!(*uri_out = virURIFormat(uri)))
+ goto cleanup;
+ } else {
+ *uri_out = g_strdup_printf("%s:%d", uri_in, port);
+ }
} else {
- *uri_out = g_strdup_printf("%s:%d", uri_in, port);
+ port = uri->port;
+ autoPort = false;
}
- } else {
- port = uri->port;
- autoPort = false;
}
}
@@ -3185,6 +3192,7 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
enum qemuMigrationDestinationType {
MIGRATION_DEST_HOST,
MIGRATION_DEST_CONNECT_HOST,
+ MIGRATION_DEST_CONNECT_SOCKET,
MIGRATION_DEST_FD,
};
@@ -3204,6 +3212,10 @@ struct _qemuMigrationSpec {
int port;
} host;
+ struct {
+ const char *path;
+ } socket;
+
struct {
int qemu;
int local;
@@ -3418,13 +3430,29 @@ qemuMigrationSrcConnect(virQEMUDriverPtr driver,
if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
- port = g_strdup_printf("%d", spec->dest.host.port);
- if (virNetSocketNewConnectTCP(spec->dest.host.name,
- port,
- AF_UNSPEC,
- &sock) == 0) {
- fd_qemu = virNetSocketDupFD(sock, true);
- virObjectUnref(sock);
+
+ switch (spec->destType) {
+ case MIGRATION_DEST_CONNECT_HOST:
+ port = g_strdup_printf("%d", spec->dest.host.port);
+ if (virNetSocketNewConnectTCP(spec->dest.host.name,
+ port,
+ AF_UNSPEC,
+ &sock) == 0) {
+ fd_qemu = virNetSocketDupFD(sock, true);
+ virObjectUnref(sock);
+ }
+ break;
+ case MIGRATION_DEST_CONNECT_SOCKET:
+ if (virNetSocketNewConnectUNIX(spec->dest.socket.path,
+ false, NULL,
+ &sock) == 0) {
+ fd_qemu = virNetSocketDupFD(sock, true);
+ virObjectUnref(sock);
+ }
+ break;
+ case MIGRATION_DEST_HOST:
+ case MIGRATION_DEST_FD:
+ break;
}
spec->destType = MIGRATION_DEST_FD;
@@ -3632,6 +3660,14 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
if (mig->nbd) {
+ const char *host = "";
+
+ if (spec->destType == MIGRATION_DEST_HOST ||
+ spec->destType == MIGRATION_DEST_CONNECT_HOST) {
+ host = spec->dest.host.name;
+ }
+
+
/* Currently libvirt does not support setting up of the NBD
* non-shared storage migration with TLS. As we need to honour the
* VIR_MIGRATE_TLS flag, we need to reject such migration until
@@ -3645,7 +3681,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
/* This will update migrate_flags on success */
if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
- spec->dest.host.name,
+ host,
migrate_speed,
&migrate_flags,
nmigrate_disks,
@@ -3693,7 +3729,8 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
goto exit_monitor;
/* connect to the destination qemu if needed */
- if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
+ if ((spec->destType == MIGRATION_DEST_CONNECT_HOST ||
+ spec->destType == MIGRATION_DEST_CONNECT_SOCKET) &&
qemuMigrationSrcConnect(driver, vm, spec) < 0) {
goto exit_monitor;
}
@@ -3716,6 +3753,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
break;
case MIGRATION_DEST_CONNECT_HOST:
+ case MIGRATION_DEST_CONNECT_SOCKET:
/* handled above and transformed into MIGRATION_DEST_FD */
break;
@@ -3931,16 +3969,34 @@ qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
}
}
- /* RDMA and multi-fd migration requires QEMU to connect to the destination
- * itself.
- */
- if (STREQ(uribits->scheme, "rdma") || (flags &
VIR_MIGRATE_PARALLEL))
- spec.destType = MIGRATION_DEST_HOST;
- else
- spec.destType = MIGRATION_DEST_CONNECT_HOST;
- spec.dest.host.protocol = uribits->scheme;
- spec.dest.host.name = uribits->server;
- spec.dest.host.port = uribits->port;
+ if (STREQ(uribits->scheme, "unix")) {
+ if (flags & VIR_MIGRATE_TLS) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("Migration over UNIX socket with TLS is not
supported"));
+ return -1;
+ }
+ if (flags & VIR_MIGRATE_PARALLEL) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("Parallel migration over UNIX socket is not
supported"));
+ return -1;
+ }
+
+ spec.destType = MIGRATION_DEST_CONNECT_SOCKET;
+ spec.dest.socket.path = virURIGetParam(uribits, "socket");
+ } else {
+ /* RDMA and multi-fd migration requires QEMU to connect to the destination
+ * itself.
+ */
+ if (STREQ(uribits->scheme, "rdma") || (flags &
VIR_MIGRATE_PARALLEL))
+ spec.destType = MIGRATION_DEST_HOST;
+ else
+ spec.destType = MIGRATION_DEST_CONNECT_HOST;
+
+ spec.dest.host.protocol = uribits->scheme;
+ spec.dest.host.name = uribits->server;
+ spec.dest.host.port = uribits->port;
+ }
+
spec.fwdType = MIGRATION_FWD_DIRECT;
ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
--
2.28.0