[libvirt] [PATCH v2] rpc: Remove keepalive_required option
by Martin Kletzander
Since its introduction in 2011 (particularly in commit f4324e329275),
the option doesn't work. It just effectively disables all incoming
connections. That's because the client private data that contain the
'keepalive_supported' boolean, are initialized to zeroes so the bool is
false and the only other place where the bool is used is when checking
whether the client supports keepalive. Thus, according to the server,
no client supports keepalive.
Removing this instead of fixing it is better because a) apparently
nobody ever tried it since 2011 (4 years without one month) and b) we
cannot know whether the client supports keepalive until we get a ping or
pong keepalive packet. And that won't happen untile after we dispatched
the ConnectOpen call.
Another two reasons would be c) the keepalive_required was tracked on
the server level, but keepalive_supported was in private data of the
client as well as the check that was made in the remote layer, thus
making all other instances of virNetServer miss this feature unless they
all implemented it for themselves and d) we can always add it back in
case there is a request and a use-case for it.
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
v2:
- Added a new daemontest file with current input/output.
daemon/libvirtd-config.c | 4 ----
daemon/libvirtd-config.h | 2 --
daemon/libvirtd.aug | 2 --
daemon/libvirtd.c | 2 --
daemon/libvirtd.conf | 7 ------
daemon/libvirtd.h | 1 -
daemon/remote.c | 8 +------
daemon/test_libvirtd.aug.in | 2 --
src/libvirt_remote.syms | 1 -
src/locking/lock_daemon.c | 2 +-
src/lxc/lxc_controller.c | 2 +-
src/rpc/virnetserver.c | 25 +---------------------
src/rpc/virnetserver.h | 3 ---
....json => input-data-no-keepalive-required.json} | 2 --
.../virnetdaemondata/output-data-admin-nomdns.json | 2 --
.../virnetdaemondata/output-data-anon-clients.json | 1 -
.../output-data-initial-nomdns.json | 1 -
tests/virnetdaemondata/output-data-initial.json | 1 -
...json => output-data-no-keepalive-required.json} | 2 --
tests/virnetdaemontest.c | 2 +-
20 files changed, 5 insertions(+), 67 deletions(-)
copy tests/virnetdaemondata/{input-data-admin-nomdns.json => input-data-no-keepalive-required.json} (96%)
copy tests/virnetdaemondata/{input-data-admin-nomdns.json => output-data-no-keepalive-required.json} (96%)
diff --git a/daemon/libvirtd-config.c b/daemon/libvirtd-config.c
index 10dcc423d2db..c31c8b2e9ab5 100644
--- a/daemon/libvirtd-config.c
+++ b/daemon/libvirtd-config.c
@@ -292,7 +292,6 @@ daemonConfigNew(bool privileged ATTRIBUTE_UNUSED)
data->keepalive_interval = 5;
data->keepalive_count = 5;
- data->keepalive_required = 0;
data->admin_min_workers = 5;
data->admin_max_workers = 20;
@@ -302,7 +301,6 @@ daemonConfigNew(bool privileged ATTRIBUTE_UNUSED)
data->admin_keepalive_interval = 5;
data->admin_keepalive_count = 5;
- data->admin_keepalive_required = 0;
localhost = virGetHostname();
if (localhost == NULL) {
@@ -471,11 +469,9 @@ daemonConfigLoadOptions(struct daemonConfig *data,
GET_CONF_INT(conf, filename, keepalive_interval);
GET_CONF_UINT(conf, filename, keepalive_count);
- GET_CONF_UINT(conf, filename, keepalive_required);
GET_CONF_INT(conf, filename, admin_keepalive_interval);
GET_CONF_UINT(conf, filename, admin_keepalive_count);
- GET_CONF_UINT(conf, filename, admin_keepalive_required);
return 0;
diff --git a/daemon/libvirtd-config.h b/daemon/libvirtd-config.h
index 9cdae1a0cb59..3e1971d67f05 100644
--- a/daemon/libvirtd-config.h
+++ b/daemon/libvirtd-config.h
@@ -81,7 +81,6 @@ struct daemonConfig {
int keepalive_interval;
unsigned int keepalive_count;
- int keepalive_required;
int admin_min_workers;
int admin_max_workers;
@@ -91,7 +90,6 @@ struct daemonConfig {
int admin_keepalive_interval;
unsigned int admin_keepalive_count;
- int admin_keepalive_required;
};
diff --git a/daemon/libvirtd.aug b/daemon/libvirtd.aug
index a70aa1dddf90..7c7992dd0568 100644
--- a/daemon/libvirtd.aug
+++ b/daemon/libvirtd.aug
@@ -79,11 +79,9 @@ module Libvirtd =
let keepalive_entry = int_entry "keepalive_interval"
| int_entry "keepalive_count"
- | bool_entry "keepalive_required"
let admin_keepalive_entry = int_entry "admin_keepalive_interval"
| int_entry "admin_keepalive_count"
- | bool_entry "admin_keepalive_required"
let misc_entry = str_entry "host_uuid"
diff --git a/daemon/libvirtd.c b/daemon/libvirtd.c
index 71db4a042c7f..250094bd21dd 100644
--- a/daemon/libvirtd.c
+++ b/daemon/libvirtd.c
@@ -1389,7 +1389,6 @@ int main(int argc, char **argv) {
config->max_anonymous_clients,
config->keepalive_interval,
config->keepalive_count,
- !!config->keepalive_required,
config->mdns_adv ? config->mdns_name : NULL,
remoteClientInitHook,
NULL,
@@ -1464,7 +1463,6 @@ int main(int argc, char **argv) {
0,
config->admin_keepalive_interval,
config->admin_keepalive_count,
- !!config->admin_keepalive_required,
NULL,
remoteAdmClientInitHook,
NULL,
diff --git a/daemon/libvirtd.conf b/daemon/libvirtd.conf
index ac06cdd79103..cd990b28f744 100644
--- a/daemon/libvirtd.conf
+++ b/daemon/libvirtd.conf
@@ -440,14 +440,7 @@
#
#keepalive_interval = 5
#keepalive_count = 5
-#
-# If set to 1, libvirtd will refuse to talk to clients that do not
-# support keepalive protocol. Defaults to 0.
-#
-#keepalive_required = 1
# Keepalive settings for the admin interface
#admin_keepalive_interval = 5
#admin_keepalive_count = 5
-#
-#admin_keepalive_required = 1
diff --git a/daemon/libvirtd.h b/daemon/libvirtd.h
index 8c1a904893ab..efd4823ae18e 100644
--- a/daemon/libvirtd.h
+++ b/daemon/libvirtd.h
@@ -72,7 +72,6 @@ struct daemonClientPrivate {
virConnectPtr conn;
daemonClientStreamPtr streams;
- bool keepalive_supported;
};
/* Separate private data for admin connection */
diff --git a/daemon/remote.c b/daemon/remote.c
index e9e2dcae80e0..3a3eb0913088 100644
--- a/daemon/remote.c
+++ b/daemon/remote.c
@@ -1290,7 +1290,7 @@ void *remoteClientInitHook(virNetServerClientPtr client,
/*----- Functions. -----*/
static int
-remoteDispatchConnectOpen(virNetServerPtr server,
+remoteDispatchConnectOpen(virNetServerPtr server ATTRIBUTE_UNUSED,
virNetServerClientPtr client,
virNetMessagePtr msg ATTRIBUTE_UNUSED,
virNetMessageErrorPtr rerr,
@@ -1309,12 +1309,6 @@ remoteDispatchConnectOpen(virNetServerPtr server,
goto cleanup;
}
- if (virNetServerKeepAliveRequired(server) && !priv->keepalive_supported) {
- virReportError(VIR_ERR_OPERATION_FAILED, "%s",
- _("keepalive support is required to connect"));
- goto cleanup;
- }
-
name = args->name ? *args->name : NULL;
/* If this connection arrived on a readonly socket, force
diff --git a/daemon/test_libvirtd.aug.in b/daemon/test_libvirtd.aug.in
index 4921cbfb86b3..3c29aac9747f 100644
--- a/daemon/test_libvirtd.aug.in
+++ b/daemon/test_libvirtd.aug.in
@@ -57,7 +57,5 @@ module Test_libvirtd =
{ "host_uuid" = "00000000-0000-0000-0000-000000000000" }
{ "keepalive_interval" = "5" }
{ "keepalive_count" = "5" }
- { "keepalive_required" = "1" }
{ "admin_keepalive_interval" = "5" }
{ "admin_keepalive_count" = "5" }
- { "admin_keepalive_required" = "1" }
diff --git a/src/libvirt_remote.syms b/src/libvirt_remote.syms
index 6bfdcfa819bf..90a453c8be9c 100644
--- a/src/libvirt_remote.syms
+++ b/src/libvirt_remote.syms
@@ -101,7 +101,6 @@ virNetServerAddProgram;
virNetServerAddService;
virNetServerClose;
virNetServerHasClients;
-virNetServerKeepAliveRequired;
virNetServerNew;
virNetServerNewPostExecRestart;
virNetServerPreExecRestart;
diff --git a/src/locking/lock_daemon.c b/src/locking/lock_daemon.c
index ecbe03a4c154..c03502459acd 100644
--- a/src/locking/lock_daemon.c
+++ b/src/locking/lock_daemon.c
@@ -151,7 +151,7 @@ virLockDaemonNew(virLockDaemonConfigPtr config, bool privileged)
if (!(lockd->srv = virNetServerNew(1, 1, 0, config->max_clients,
config->max_clients, -1, 0,
- false, NULL,
+ NULL,
virLockDaemonClientNew,
virLockDaemonClientPreExecRestart,
virLockDaemonClientFree,
diff --git a/src/lxc/lxc_controller.c b/src/lxc/lxc_controller.c
index 110a55662be0..48a3597ed274 100644
--- a/src/lxc/lxc_controller.c
+++ b/src/lxc/lxc_controller.c
@@ -925,7 +925,7 @@ static int virLXCControllerSetupServer(virLXCControllerPtr ctrl)
return -1;
if (!(srv = virNetServerNew(0, 0, 0, 1,
- 0, -1, 0, false,
+ 0, -1, 0,
NULL,
virLXCControllerClientPrivateNew,
NULL,
diff --git a/src/rpc/virnetserver.c b/src/rpc/virnetserver.c
index 60a9714f6096..80b5588bf3c9 100644
--- a/src/rpc/virnetserver.c
+++ b/src/rpc/virnetserver.c
@@ -69,7 +69,6 @@ struct _virNetServer {
int keepaliveInterval;
unsigned int keepaliveCount;
- bool keepaliveRequired;
#ifdef WITH_GNUTLS
virNetTLSContextPtr tls;
@@ -312,7 +311,6 @@ virNetServerPtr virNetServerNew(size_t min_workers,
size_t max_anonymous_clients,
int keepaliveInterval,
unsigned int keepaliveCount,
- bool keepaliveRequired,
const char *mdnsGroupName,
virNetServerClientPrivNew clientPrivNew,
virNetServerClientPrivPreExecRestart clientPrivPreExecRestart,
@@ -338,7 +336,6 @@ virNetServerPtr virNetServerNew(size_t min_workers,
srv->nclients_unauth_max = max_anonymous_clients;
srv->keepaliveInterval = keepaliveInterval;
srv->keepaliveCount = keepaliveCount;
- srv->keepaliveRequired = keepaliveRequired;
srv->clientPrivNew = clientPrivNew;
srv->clientPrivPreExecRestart = clientPrivPreExecRestart;
srv->clientPrivFree = clientPrivFree;
@@ -380,7 +377,6 @@ virNetServerPtr virNetServerNewPostExecRestart(virJSONValuePtr object,
unsigned int max_anonymous_clients;
unsigned int keepaliveInterval;
unsigned int keepaliveCount;
- bool keepaliveRequired;
const char *mdnsGroupName = NULL;
if (virJSONValueObjectGetNumberUint(object, "min_workers", &min_workers) < 0) {
@@ -423,11 +419,6 @@ virNetServerPtr virNetServerNewPostExecRestart(virJSONValuePtr object,
_("Missing keepaliveCount data in JSON document"));
goto error;
}
- if (virJSONValueObjectGetBoolean(object, "keepaliveRequired", &keepaliveRequired) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("Missing keepaliveRequired data in JSON document"));
- goto error;
- }
if (virJSONValueObjectHasKey(object, "mdnsGroupName") &&
(!(mdnsGroupName = virJSONValueObjectGetString(object, "mdnsGroupName")))) {
@@ -440,7 +431,7 @@ virNetServerPtr virNetServerNewPostExecRestart(virJSONValuePtr object,
priority_workers, max_clients,
max_anonymous_clients,
keepaliveInterval, keepaliveCount,
- keepaliveRequired, mdnsGroupName,
+ mdnsGroupName,
clientPrivNew, clientPrivPreExecRestart,
clientPrivFree, clientPrivOpaque)))
goto error;
@@ -573,11 +564,6 @@ virJSONValuePtr virNetServerPreExecRestart(virNetServerPtr srv)
_("Cannot set keepaliveCount data in JSON document"));
goto error;
}
- if (virJSONValueObjectAppendBoolean(object, "keepaliveRequired", srv->keepaliveRequired) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("Cannot set keepaliveRequired data in JSON document"));
- goto error;
- }
if (srv->mdnsGroupName &&
virJSONValueObjectAppendString(object, "mdnsGroupName", srv->mdnsGroupName) < 0) {
@@ -786,15 +772,6 @@ void virNetServerClose(virNetServerPtr srv)
virObjectUnlock(srv);
}
-bool virNetServerKeepAliveRequired(virNetServerPtr srv)
-{
- bool required;
- virObjectLock(srv);
- required = srv->keepaliveRequired;
- virObjectUnlock(srv);
- return required;
-}
-
static inline size_t
virNetServerTrackPendingAuthLocked(virNetServerPtr srv)
{
diff --git a/src/rpc/virnetserver.h b/src/rpc/virnetserver.h
index 0e16e8fb1bf0..89d8db9b9ee4 100644
--- a/src/rpc/virnetserver.h
+++ b/src/rpc/virnetserver.h
@@ -41,7 +41,6 @@ virNetServerPtr virNetServerNew(size_t min_workers,
size_t max_anonymous_clients,
int keepaliveInterval,
unsigned int keepaliveCount,
- bool keepaliveRequired,
const char *mdnsGroupName,
virNetServerClientPrivNew clientPrivNew,
virNetServerClientPrivPreExecRestart clientPrivPreExecRestart,
@@ -74,8 +73,6 @@ int virNetServerSetTLSContext(virNetServerPtr srv,
virNetTLSContextPtr tls);
# endif
-bool virNetServerKeepAliveRequired(virNetServerPtr srv);
-
size_t virNetServerTrackPendingAuth(virNetServerPtr srv);
size_t virNetServerTrackCompletedAuth(virNetServerPtr srv);
diff --git a/tests/virnetdaemondata/input-data-admin-nomdns.json b/tests/virnetdaemondata/input-data-no-keepalive-required.json
similarity index 96%
copy from tests/virnetdaemondata/input-data-admin-nomdns.json
copy to tests/virnetdaemondata/input-data-no-keepalive-required.json
index 59bc4714a60b..b5e4dc8e0391 100644
--- a/tests/virnetdaemondata/input-data-admin-nomdns.json
+++ b/tests/virnetdaemondata/input-data-no-keepalive-required.json
@@ -7,7 +7,6 @@
"max_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
@@ -68,7 +67,6 @@
"max_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
diff --git a/tests/virnetdaemondata/output-data-admin-nomdns.json b/tests/virnetdaemondata/output-data-admin-nomdns.json
index 5df71a0d88c8..a814aeb80614 100644
--- a/tests/virnetdaemondata/output-data-admin-nomdns.json
+++ b/tests/virnetdaemondata/output-data-admin-nomdns.json
@@ -8,7 +8,6 @@
"max_anonymous_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
@@ -70,7 +69,6 @@
"max_anonymous_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
diff --git a/tests/virnetdaemondata/output-data-anon-clients.json b/tests/virnetdaemondata/output-data-anon-clients.json
index 4e4332691aa7..05fc0ae00d3f 100644
--- a/tests/virnetdaemondata/output-data-anon-clients.json
+++ b/tests/virnetdaemondata/output-data-anon-clients.json
@@ -8,7 +8,6 @@
"max_anonymous_clients": 10,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
diff --git a/tests/virnetdaemondata/output-data-initial-nomdns.json b/tests/virnetdaemondata/output-data-initial-nomdns.json
index bef54bf94ad5..400e47bc9463 100644
--- a/tests/virnetdaemondata/output-data-initial-nomdns.json
+++ b/tests/virnetdaemondata/output-data-initial-nomdns.json
@@ -8,7 +8,6 @@
"max_anonymous_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
diff --git a/tests/virnetdaemondata/output-data-initial.json b/tests/virnetdaemondata/output-data-initial.json
index 9afa791d91fc..e875cffe5c01 100644
--- a/tests/virnetdaemondata/output-data-initial.json
+++ b/tests/virnetdaemondata/output-data-initial.json
@@ -8,7 +8,6 @@
"max_anonymous_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"mdnsGroupName": "libvirtTest",
"services": [
{
diff --git a/tests/virnetdaemondata/input-data-admin-nomdns.json b/tests/virnetdaemondata/output-data-no-keepalive-required.json
similarity index 96%
copy from tests/virnetdaemondata/input-data-admin-nomdns.json
copy to tests/virnetdaemondata/output-data-no-keepalive-required.json
index 59bc4714a60b..b5e4dc8e0391 100644
--- a/tests/virnetdaemondata/input-data-admin-nomdns.json
+++ b/tests/virnetdaemondata/output-data-no-keepalive-required.json
@@ -7,7 +7,6 @@
"max_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
@@ -68,7 +67,6 @@
"max_clients": 100,
"keepaliveInterval": 120,
"keepaliveCount": 5,
- "keepaliveRequired": true,
"services": [
{
"auth": 0,
diff --git a/tests/virnetdaemontest.c b/tests/virnetdaemontest.c
index ef45018f5873..fb8a6c0c0ec5 100644
--- a/tests/virnetdaemontest.c
+++ b/tests/virnetdaemontest.c
@@ -50,7 +50,7 @@ testCreateServer(const char *host, int family)
}
if (!(srv = virNetServerNew(10, 50, 5, 100, 10,
- 120, 5, true,
+ 120, 5,
mdns_group,
NULL,
NULL,
--
2.5.0
9 years, 4 months
[libvirt] [python PATCH] Check return value of PyList_Append
by Jiri Denemark
libvirt_virDomainGetSecurityLabelList called PyList_Append without
checking its return value. While looking at it I noticed the function
did not properly check several other return values either so I fixed
them all.
https://bugzilla.redhat.com/show_bug.cgi?id=1249511
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
libvirt-override.c | 46 ++++++++++++++++++++++++++++++++++++++--------
1 file changed, 38 insertions(+), 8 deletions(-)
diff --git a/libvirt-override.c b/libvirt-override.c
index 45c8afc..95061e8 100644
--- a/libvirt-override.c
+++ b/libvirt-override.c
@@ -3142,32 +3142,62 @@ libvirt_virDomainGetSecurityLabel(PyObject *self ATTRIBUTE_UNUSED, PyObject *arg
#if LIBVIR_CHECK_VERSION(0, 10, 0)
static PyObject *
-libvirt_virDomainGetSecurityLabelList(PyObject *self ATTRIBUTE_UNUSED, PyObject *args) {
+libvirt_virDomainGetSecurityLabelList(PyObject *self ATTRIBUTE_UNUSED,
+ PyObject *args)
+{
PyObject *py_retval;
int c_retval;
virDomainPtr dom;
PyObject *pyobj_dom;
- virSecurityLabel *labels;
+ virSecurityLabel *labels = NULL;
size_t i;
if (!PyArg_ParseTuple(args, (char *)"O:virDomainGetSecurityLabel", &pyobj_dom))
return NULL;
+
dom = (virDomainPtr) PyvirDomain_Get(pyobj_dom);
LIBVIRT_BEGIN_ALLOW_THREADS;
c_retval = virDomainGetSecurityLabelList(dom, &labels);
LIBVIRT_END_ALLOW_THREADS;
+
if (c_retval < 0)
return VIR_PY_NONE;
- py_retval = PyList_New(0);
+
+ if (!(py_retval = PyList_New(0)))
+ goto error;
+
for (i = 0 ; i < c_retval ; i++) {
- PyObject *entry = PyList_New(2);
- PyList_SetItem(entry, 0, libvirt_constcharPtrWrap(&labels[i].label[0]));
- PyList_SetItem(entry, 1, libvirt_boolWrap(labels[i].enforcing));
- PyList_Append(py_retval, entry);
+ PyObject *entry;
+ PyObject *value;
+
+ if (!(entry = PyList_New(2)) ||
+ PyList_Append(py_retval, entry) < 0) {
+ Py_XDECREF(entry);
+ goto error;
+ }
+
+ if (!(value = libvirt_constcharPtrWrap(&labels[i].label[0])) ||
+ PyList_SetItem(entry, 0, value) < 0) {
+ Py_XDECREF(value);
+ goto error;
+ }
+
+ if (!(value = libvirt_boolWrap(labels[i].enforcing)) ||
+ PyList_SetItem(entry, 1, value) < 0) {
+ Py_XDECREF(value);
+ goto error;
+ }
}
- free(labels);
+
+ cleanup:
+ VIR_FREE(labels);
return py_retval;
+
+ error:
+ Py_XDECREF(py_retval);
+ py_retval = NULL;
+ goto cleanup;
}
#endif /* LIBVIR_CHECK_VERSION(0, 10, 0) */
--
2.5.0
9 years, 4 months
[libvirt] [PATCH] lxc: Add option to inherit namespace from a name container or a pid or a netns
by ik.nitk
lxc / docker containers gives option to inherit the
namespaces. Example lxc-start has option [ --share-[net|ipc|uts] name|pid ]
where --share-net name|pid means Inherit a network namespace from a
name container or a pid.
This patch tries to add the similar option to libvirt lxc. So to inherit namespace from name
container c2.
add this into xml.
<lxc:namespace>
<sharenet type='name' value='c2'/>
</lxc:namespace>
And to inherit namespace from a pid.
add this into xml.
<lxc:namespace>
<sharenet type='pid' value='10245'/>
</lxc:namespace>
And to inherit namespace from a netns.
add this into xml.
<lxc:namespace>
<sharenet type='netns' value='red'/>
</lxc:namespace>
Similar options for ipc/uts.
<shareipc /> , <shareuts />
The reasong lxc xml namespace is added because this feature is very specific to lxc. Therfore wanted to
keep it seperated from actual libvirt xml domain.
So the final vrish xml file would look like
<domain type='lxc' xmlns:lxc='http://libvirt.org/schemas/domain/lxc/1.0'>
<name>cn-03</name>
<memory>327680</memory>
<os>
<type>exe</type>
<init>/sbin/init</init>
</os>
<lxc:namespace>
<sharenet type='netns' value='red'/>
</lxc:namespace>
<vcpu>1</vcpu>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<devices>
<emulator>/usr/lib/libvirt/libvirt_lxc</emulator>
<filesystem type='mount'>
<source dir='/var/lib/lxc/u1/rootfs'/>
<target dir='/'/>
</filesystem>
<console type='pty'/>
</devices>
</domain>
-imran
---
src/Makefile.am | 5 +-
src/lxc/lxc_conf.c | 2 +-
src/lxc/lxc_conf.h | 23 +++++
src/lxc/lxc_container.c | 191 ++++++++++++++++++++++++++++++++++--
src/lxc/lxc_domain.c | 254 +++++++++++++++++++++++++++++++++++++++++++++++-
src/lxc/lxc_domain.h | 1 +
6 files changed, 463 insertions(+), 13 deletions(-)
diff --git a/src/Makefile.am b/src/Makefile.am
index 579421d..1a78fde 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1293,7 +1293,8 @@ libvirt_driver_lxc_impl_la_CFLAGS = \
-I$(srcdir)/access \
-I$(srcdir)/conf \
$(AM_CFLAGS)
-libvirt_driver_lxc_impl_la_LIBADD = $(CAPNG_LIBS) $(LIBNL_LIBS) $(FUSE_LIBS)
+libvirt_driver_lxc_impl_la_LIBADD = $(CAPNG_LIBS) $(LIBNL_LIBS) $(LIBXML_LIBS) $(FUSE_LIBS)
+libvirt_driver_lxc_impl_la_LDFLAGS = libvirt-lxc.la
if WITH_BLKID
libvirt_driver_lxc_impl_la_CFLAGS += $(BLKID_CFLAGS)
libvirt_driver_lxc_impl_la_LIBADD += $(BLKID_LIBS)
@@ -2652,6 +2653,8 @@ libvirt_lxc_LDADD = \
libvirt-net-rpc.la \
libvirt_security_manager.la \
libvirt_conf.la \
+ libvirt.la \
+ libvirt-lxc.la \
libvirt_util.la \
../gnulib/lib/libgnu.la
if WITH_DTRACE_PROBES
diff --git a/src/lxc/lxc_conf.c b/src/lxc/lxc_conf.c
index c393cb5..96a0f47 100644
--- a/src/lxc/lxc_conf.c
+++ b/src/lxc/lxc_conf.c
@@ -213,7 +213,7 @@ lxcDomainXMLConfInit(void)
{
return virDomainXMLOptionNew(&virLXCDriverDomainDefParserConfig,
&virLXCDriverPrivateDataCallbacks,
- NULL);
+ &virLXCDriverDomainXMLNamespace);
}
diff --git a/src/lxc/lxc_conf.h b/src/lxc/lxc_conf.h
index 8340b1f..59002e5 100644
--- a/src/lxc/lxc_conf.h
+++ b/src/lxc/lxc_conf.h
@@ -67,6 +67,29 @@ struct _virLXCDriverConfig {
bool securityRequireConfined;
};
+
+typedef enum {
+ VIR_DOMAIN_NAMESPACE_SHARENET = 0,
+ VIR_DOMAIN_NAMESPACE_SHAREIPC,
+ VIR_DOMAIN_NAMESPACE_SHAREUTS,
+ VIR_DOMAIN_NAMESPACE_LAST,
+} virDomainNamespace;
+
+struct ns_info {
+ const char *proc_name;
+ int clone_flag;
+};
+
+extern const struct ns_info ns_info[VIR_DOMAIN_NAMESPACE_LAST];
+
+typedef struct _lxcDomainDef lxcDomainDef;
+typedef lxcDomainDef *lxcDomainDefPtr;
+struct _lxcDomainDef {
+ int ns_inherit_fd[VIR_DOMAIN_NAMESPACE_LAST];
+ char *ns_type[VIR_DOMAIN_NAMESPACE_LAST];
+ char *ns_val[VIR_DOMAIN_NAMESPACE_LAST];
+};
+
struct _virLXCDriver {
virMutex lock;
diff --git a/src/lxc/lxc_container.c b/src/lxc/lxc_container.c
index 9a9ae5c..a9a7ba0 100644
--- a/src/lxc/lxc_container.c
+++ b/src/lxc/lxc_container.c
@@ -25,8 +25,8 @@
*/
#include <config.h>
-
#include <fcntl.h>
+#include <sched.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
@@ -38,7 +38,6 @@
#include <mntent.h>
#include <sys/reboot.h>
#include <linux/reboot.h>
-
/* Yes, we want linux private one, for _syscall2() macro */
#include <linux/unistd.h>
@@ -99,6 +98,50 @@ VIR_LOG_INIT("lxc.lxc_container");
typedef char lxc_message_t;
#define LXC_CONTINUE_MSG 'c'
+#ifdef __linux__
+/*
+ * Workaround older glibc. While kernel may support the setns
+ * syscall, the glibc wrapper might not exist. If that's the
+ * case, use our own.
+ */
+# ifndef __NR_setns
+# if defined(__x86_64__)
+# define __NR_setns 308
+# elif defined(__i386__)
+# define __NR_setns 346
+# elif defined(__arm__)
+# define __NR_setns 375
+# elif defined(__aarch64__)
+# define __NR_setns 375
+# elif defined(__powerpc__)
+# define __NR_setns 350
+# elif defined(__s390__)
+# define __NR_setns 339
+# endif
+# endif
+
+# ifndef HAVE_SETNS
+# if defined(__NR_setns)
+# include <sys/syscall.h>
+
+static inline int setns(int fd, int nstype)
+{
+ return syscall(__NR_setns, fd, nstype);
+}
+# else /* !__NR_setns */
+# error Please determine the syscall number for setns on your architecture
+# endif
+# endif
+#else /* !__linux__ */
+static inline int setns(int fd ATTRIBUTE_UNUSED, int nstype ATTRIBUTE_UNUSED)
+{
+ virReportSystemError(ENOSYS, "%s",
+ _("Namespaces are not supported on this platform."));
+ return -1;
+}
+#endif
+
+
typedef struct __lxc_child_argv lxc_child_argv_t;
struct __lxc_child_argv {
virDomainDefPtr config;
@@ -2233,7 +2276,6 @@ static int lxcContainerChild(void *data)
vmDef->os.init);
goto cleanup;
}
-
/* rename and enable interfaces */
if (lxcContainerRenameAndEnableInterfaces(vmDef,
argv->nveths,
@@ -2321,6 +2363,99 @@ virArch lxcContainerGetAlt32bitArch(virArch arch)
return VIR_ARCH_NONE;
}
+/* Used only for containers,same as the one defined in
+ * domain_conf.c. But used locally
+ */
+static const struct ns_info ns_info_local[VIR_DOMAIN_NAMESPACE_LAST] = {
+ [VIR_DOMAIN_NAMESPACE_SHARENET] = {"net", CLONE_NEWNET},
+ [VIR_DOMAIN_NAMESPACE_SHAREIPC] = {"ipc", CLONE_NEWIPC},
+ [VIR_DOMAIN_NAMESPACE_SHAREUTS] = {"uts", CLONE_NEWUTS}
+};
+
+
+static void close_ns(int ns_fd[VIR_DOMAIN_NAMESPACE_LAST])
+{
+ int i;
+ for (i = 0; i < VIR_DOMAIN_NAMESPACE_LAST; i++) {
+ if (ns_fd[i] > -1) {
+ if (VIR_CLOSE(ns_fd[i]) < 0)
+ virReportSystemError(errno, "%s", _("failed to close file"));
+ ns_fd[i] = -1;
+ }
+ }
+}
+
+
+/**
+ * lxcPreserve_ns:
+ * @ns_fd: array to store current namespace
+ * @clone_flags: namespaces that need to be preserved
+ */
+static int lxcPreserve_ns(int ns_fd[VIR_DOMAIN_NAMESPACE_LAST], int clone_flags)
+{
+ int i, saved_errno;
+ char *path = NULL;
+
+ for (i = 0; i < VIR_DOMAIN_NAMESPACE_LAST; i++)
+ ns_fd[i] = -1;
+
+ if (access("/proc/self/ns", X_OK)) {
+ virReportSystemError(errno, "%s",
+ _("Kernel does not support attach; preserve_ns ignored"));
+ return 0;
+ }
+
+ for (i = 0; i < VIR_DOMAIN_NAMESPACE_LAST; i++) {
+ if ((clone_flags & ns_info_local[i].clone_flag) == 0)
+ continue;
+ if (virAsprintf(&path, "/proc/self/ns/%s",
+ ns_info_local[i].proc_name) < 0)
+ goto error;
+ ns_fd[i] = open(path, O_RDONLY | O_CLOEXEC);
+ if (ns_fd[i] < 0)
+ goto error;
+ }
+ VIR_FREE(path);
+ return 0;
+ error:
+ saved_errno = errno;
+ close_ns(ns_fd);
+ errno = saved_errno;
+ VIR_FREE(path);
+ virReportSystemError(errno, _("failed to open '%s'"), path);
+ return -1;
+}
+
+/**
+ * lxcAttach_ns:
+ * @ns_fd: array of namespaces to attach
+ */
+static int lxcAttach_ns(const int ns_fd[VIR_DOMAIN_NAMESPACE_LAST])
+{
+ int i;
+
+ for (i = 0; i < VIR_DOMAIN_NAMESPACE_LAST; i++) {
+ if (ns_fd[i] < 0)
+ continue;
+ VIR_DEBUG("Setting into namespace\n");
+
+ /* We get EINVAL if new NS is same as the current
+ * NS, or if the fd namespace doesn't match the
+ * type passed to setns()'s second param. Since we
+ * pass 0, we know the EINVAL is harmless
+ */
+ if (setns(ns_fd[i], 0) < 0 &&
+ errno != EINVAL)
+ goto error;
+ }
+ return 0;
+
+ error:
+ virReportSystemError(errno, _("failed to set namespace '%s'")
+ , ns_info_local[i].proc_name);
+ return -1;
+}
+
/**
* lxcContainerStart:
@@ -2346,9 +2481,12 @@ int lxcContainerStart(virDomainDefPtr def,
char **ttyPaths)
{
pid_t pid;
- int cflags;
+ int cflags, i;
int stacksize = getpagesize() * 4;
char *stack, *stacktop;
+ int saved_ns_fd[VIR_DOMAIN_NAMESPACE_LAST];
+ int preserve_mask = 0;
+ lxcDomainDefPtr lxcDef;
lxc_child_argv_t args = {
.config = def,
.securityDriver = securityDriver,
@@ -2368,7 +2506,14 @@ int lxcContainerStart(virDomainDefPtr def,
stacktop = stack + stacksize;
- cflags = CLONE_NEWPID|CLONE_NEWNS|CLONE_NEWUTS|CLONE_NEWIPC|SIGCHLD;
+ lxcDef = def->namespaceData;
+ for (i = 0; i < VIR_DOMAIN_NAMESPACE_LAST; i++)
+ if (lxcDef && lxcDef->ns_inherit_fd[i] != -1)
+ preserve_mask |= ns_info_local[i].clone_flag;
+
+
+
+ cflags = CLONE_NEWPID|CLONE_NEWNS|SIGCHLD;
if (userns_required(def)) {
if (userns_supported()) {
@@ -2381,10 +2526,36 @@ int lxcContainerStart(virDomainDefPtr def,
return -1;
}
}
+ if (!lxcDef || (lxcDef && lxcDef->ns_inherit_fd[VIR_DOMAIN_NAMESPACE_SHARENET] == -1)) {
+ if (lxcNeedNetworkNamespace(def)) {
+ VIR_DEBUG("Enable network namespaces");
+ cflags |= CLONE_NEWNET;
+ }
+ } else {
+ VIR_DEBUG("Inheriting a net namespace");
+ }
- if (lxcNeedNetworkNamespace(def)) {
- VIR_DEBUG("Enable network namespaces");
- cflags |= CLONE_NEWNET;
+ if (!lxcDef || (lxcDef && lxcDef->ns_inherit_fd[VIR_DOMAIN_NAMESPACE_SHAREIPC] == -1)) {
+ cflags |= CLONE_NEWIPC;
+ } else {
+ VIR_DEBUG("Inheriting an IPC namespace");
+ }
+
+ if (!lxcDef || (lxcDef && lxcDef->ns_inherit_fd[VIR_DOMAIN_NAMESPACE_SHAREUTS] == -1)) {
+ cflags |= CLONE_NEWUTS;
+ } else {
+ VIR_DEBUG("Inheriting a UTS namespace");
+ }
+
+ if (lxcDef && lxcPreserve_ns(saved_ns_fd, preserve_mask) < 0) {
+ virReportError(VIR_ERR_SYSTEM_ERROR, "%s",
+ _("failed to preserve the namespace"));
+ return -1;
+ }
+ if (lxcDef && lxcAttach_ns(lxcDef->ns_inherit_fd) < 0) {
+ virReportError(VIR_ERR_SYSTEM_ERROR, "%s",
+ _("failed to attach the namespace"));
+ return -1;
}
VIR_DEBUG("Cloning container init process");
@@ -2397,6 +2568,10 @@ int lxcContainerStart(virDomainDefPtr def,
_("Failed to run clone container"));
return -1;
}
+ if (lxcDef && lxcAttach_ns(saved_ns_fd)) {
+ virReportError(VIR_ERR_SYSTEM_ERROR, "%s",
+ _("failed to restore saved namespaces"));
+ }
return pid;
}
diff --git a/src/lxc/lxc_domain.c b/src/lxc/lxc_domain.c
index c2180cb..6e4a19a 100644
--- a/src/lxc/lxc_domain.c
+++ b/src/lxc/lxc_domain.c
@@ -20,14 +20,18 @@
*/
#include <config.h>
-
#include "lxc_domain.h"
-
#include "viralloc.h"
#include "virlog.h"
#include "virerror.h"
+#include <fcntl.h>
+#include <libxml/xpathInternals.h>
+#include "virstring.h"
+#include "virutil.h"
+#include "virfile.h"
#define VIR_FROM_THIS VIR_FROM_LXC
+#define LXC_NAMESPACE_HREF "http://libvirt.org/schemas/domain/lxc/1.0"
VIR_LOG_INIT("lxc.lxc_domain");
@@ -41,6 +45,251 @@ static void *virLXCDomainObjPrivateAlloc(void)
return priv;
}
+
+static int open_ns(const char *nnsname_pid, const char *ns_proc_name)
+{
+ int fd = -1;
+ virDomainPtr dom = NULL;
+ virConnectPtr conn = NULL;
+ pid_t pid;
+ int nfdlist;
+ int *fdlist;
+ char *path = NULL;
+ char *eptr;
+ pid = strtol(nnsname_pid, &eptr, 10);
+ if (*eptr != '\0' || pid < 1) {
+ /* check if the domain is running, then set the namespaces
+ * to that container
+ */
+ size_t i = 0;
+ const char *ns[] = { "user", "ipc", "uts", "net", "pid", "mnt" };
+ conn = virConnectOpen("lxc:///");
+ if (!conn)
+ goto cleanup;
+ dom = virDomainLookupByName(conn, nnsname_pid);
+ if (!dom)
+ goto cleanup;
+ if ((nfdlist = virDomainLxcOpenNamespace(dom, &fdlist, 0)) < 0)
+ goto cleanup;
+ /* Internally above function calls virProcessGetNamespaces
+ * function which opens ns
+ * in the order { "user", "ipc", "uts", "net", "pid", "mnt" }
+ */
+ for (i = 0; i < ARRAY_CARDINALITY(ns); i++) {
+ if (STREQ(ns[i], ns_proc_name)) {
+ fd = fdlist[i];
+ break;
+ }
+ }
+ if (nfdlist > 0)
+ VIR_FREE(fdlist);
+ } else {
+ if (virAsprintf(&path, "/proc/%d/ns/%s", pid, ns_proc_name) < 0)
+ goto cleanup;
+ fd = open(path, O_RDONLY);
+ }
+cleanup:
+ if (dom)
+ virDomainFree(dom);
+ VIR_FREE(path);
+ (fd < 0)? VIR_ERROR(
+ _("failed to open ns %s"), nnsname_pid):
+ VIR_DEBUG("OPENED NAMESPACE : fd %d\n", fd);
+ return fd;
+}
+
+
+/* Used only for containers */
+const struct ns_info ns_info[VIR_DOMAIN_NAMESPACE_LAST] = {
+ [VIR_DOMAIN_NAMESPACE_SHARENET] = {"net", CLONE_NEWNET},
+ [VIR_DOMAIN_NAMESPACE_SHAREIPC] = {"ipc", CLONE_NEWIPC},
+ [VIR_DOMAIN_NAMESPACE_SHAREUTS] = {"uts", CLONE_NEWUTS}
+};
+
+VIR_ENUM_DECL(virDomainNamespace)
+VIR_ENUM_IMPL(virDomainNamespace, VIR_DOMAIN_NAMESPACE_LAST,
+ N_("sharenet"),
+ N_("shareipc"),
+ N_("shareuts"))
+
+static void
+lxcDomainDefNamespaceFree(void *nsdata)
+{
+ int j;
+ lxcDomainDefPtr lxcDef = nsdata;
+ for (j = 0; j < VIR_DOMAIN_NAMESPACE_LAST; j++) {
+ if (lxcDef->ns_inherit_fd[j] > 0) {
+ VIR_FREE(lxcDef->ns_type);
+ VIR_FREE(lxcDef->ns_val);
+#if 0
+ if (VIR_CLOSE(lxcDef->ns_inherit_fd[j]) < 0)
+ virReportSystemError(errno, "%s", _("failed to close file"));
+#endif
+ }
+ }
+ VIR_FREE(nsdata);
+}
+
+static int
+lxcDomainDefNamespaceParse(xmlDocPtr xml ATTRIBUTE_UNUSED,
+ xmlNodePtr root ATTRIBUTE_UNUSED,
+ xmlXPathContextPtr ctxt,
+ void **data)
+{
+ lxcDomainDefPtr lxcDef = NULL;
+ xmlNodePtr *nodes = NULL;
+ bool uses_lxc_ns = false;
+ xmlNodePtr node;
+ int feature;
+ int n;
+ char *tmp = NULL;
+ size_t i;
+ pid_t fd = -1;
+
+ if (xmlXPathRegisterNs(ctxt, BAD_CAST "lxc", BAD_CAST LXC_NAMESPACE_HREF) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Failed to register xml namespace '%s'"),
+ LXC_NAMESPACE_HREF);
+ return -1;
+ }
+
+ if (VIR_ALLOC(lxcDef) < 0)
+ return -1;
+
+ /* Init ns_herit_fd for namespaces */
+ for (i = 0; i < VIR_DOMAIN_NAMESPACE_LAST; i++) {
+ lxcDef->ns_inherit_fd[i] = -1;
+ lxcDef->ns_type[i] = NULL;
+ lxcDef->ns_val[i] = NULL;
+ }
+
+ node = ctxt->node;
+ if ((n = virXPathNodeSet("./lxc:namespace/*", ctxt, &nodes)) < 0)
+ goto error;
+ uses_lxc_ns |= n > 0;
+
+ for (i = 0; i < n; i++) {
+ feature =
+ virDomainNamespaceTypeFromString((const char *) nodes[i]->name);
+ if (feature < 0) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("unsupported Namespace feature: %s"),
+ nodes[i]->name);
+ goto error;
+ }
+
+ ctxt->node = nodes[i];
+
+ switch ((virDomainNamespace) feature) {
+ case VIR_DOMAIN_NAMESPACE_SHARENET:
+ case VIR_DOMAIN_NAMESPACE_SHAREIPC:
+ case VIR_DOMAIN_NAMESPACE_SHAREUTS:
+ {
+ tmp = virXMLPropString(nodes[i], "type");
+ if (tmp == NULL) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("No lxc environment type specified"));
+ goto error;
+ }
+ /* save the tmp so that its needed while writing to xml */
+ lxcDef->ns_type[feature] = tmp;
+ tmp = virXMLPropString(nodes[i], "value");
+ if (tmp == NULL) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ "%s", _("No lxc environment type specified"));
+ goto error;
+ }
+ lxcDef->ns_val[feature] = tmp;
+ /*netns option is only for VIR_DOMAIN_NAMESPACE_SHARENET*/
+ if (STREQ("netns", lxcDef->ns_type[VIR_DOMAIN_NAMESPACE_SHARENET])) {
+ char *path = NULL;
+ if (virAsprintf(&path, "/var/run/netns/%s", tmp) < 0)
+ goto error;
+ fd = open(path, O_RDONLY);
+ VIR_FREE(path);
+ } else {
+ fd = open_ns(tmp, ns_info[feature].proc_name);
+ if (fd < 0) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("unable to open %s namespace for "
+ "namespace feature '%s'"), tmp,
+ nodes[i]->name);
+ goto error;
+ }
+ }
+ lxcDef->ns_inherit_fd[feature] = fd;
+ }
+ break;
+ case VIR_DOMAIN_NAMESPACE_LAST:
+ break;
+ }
+ }
+ VIR_FREE(nodes);
+ ctxt->node = node;
+ if (uses_lxc_ns)
+ *data = lxcDef;
+ else
+ VIR_FREE(lxcDef);
+ return 0;
+ error:
+ VIR_FREE(nodes);
+ lxcDomainDefNamespaceFree(lxcDef);
+ return -1;
+}
+
+
+static int
+lxcDomainDefNamespaceFormatXML(virBufferPtr buf,
+ void *nsdata)
+{
+ lxcDomainDefPtr lxcDef = nsdata;
+ size_t j;
+
+ if (!lxcDef)
+ return 0;
+
+ virBufferAddLit(buf, "<lxc:namespace>\n");
+ virBufferAdjustIndent(buf, 2);
+
+ for (j = 0; j < VIR_DOMAIN_NAMESPACE_LAST; j++) {
+ switch ((virDomainNamespace) j) {
+ case VIR_DOMAIN_NAMESPACE_SHAREIPC:
+ case VIR_DOMAIN_NAMESPACE_SHAREUTS:
+ case VIR_DOMAIN_NAMESPACE_SHARENET:
+ {
+ if (lxcDef->ns_inherit_fd[j] > 0) {
+ virBufferAsprintf(buf, "<%s type='%s' value='%s'/>\n",
+ virDomainNamespaceTypeToString(j),
+ lxcDef->ns_type[j],
+ lxcDef->ns_val[j]);
+ }
+ }
+ break;
+ case VIR_DOMAIN_NAMESPACE_LAST:
+ break;
+ }
+ }
+
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, "</lxc:namespace>\n");
+ return 0;
+}
+
+static const char *
+lxcDomainDefNamespaceHref(void)
+{
+ return "xmlns:lxc='" LXC_NAMESPACE_HREF "'";
+}
+
+
+virDomainXMLNamespace virLXCDriverDomainXMLNamespace = {
+ .parse = lxcDomainDefNamespaceParse,
+ .free = lxcDomainDefNamespaceFree,
+ .format = lxcDomainDefNamespaceFormatXML,
+ .href = lxcDomainDefNamespaceHref,
+};
+
+
static void virLXCDomainObjPrivateFree(void *data)
{
virLXCDomainObjPrivatePtr priv = data;
@@ -73,7 +322,6 @@ static int virLXCDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, void *data)
} else {
priv->initpid = thepid;
}
-
return 0;
}
diff --git a/src/lxc/lxc_domain.h b/src/lxc/lxc_domain.h
index 751aece..25df999 100644
--- a/src/lxc/lxc_domain.h
+++ b/src/lxc/lxc_domain.h
@@ -41,6 +41,7 @@ struct _virLXCDomainObjPrivate {
virCgroupPtr cgroup;
};
+extern virDomainXMLNamespace virLXCDriverDomainXMLNamespace;
extern virDomainXMLPrivateDataCallbacks virLXCDriverPrivateDataCallbacks;
extern virDomainDefParserConfig virLXCDriverDomainDefParserConfig;
--
1.9.1
9 years, 4 months
[libvirt] [PATCH 0/4] manage the shmem device source
by Luyao Huang
Since there is a shmobj leak when let qemu create shmobj by
themselves, also the label of shmobj/shmem-server socket
is not right. Guest cannot direct use the shmem-server
if users enabled selinux. So it will be better to manage it
in libvirt.
The way i chosed is region the shmem deivce in a list, and
save it status to a local file to avoid losing it after restart
libvirtd, and count the guest which use it, and let the callers
know if there is no guest is using it (then we can relabel/cleanup
some resource).
Notice: you still cannot use the ivshmem-server if the process label
is not correct, just set the socket label is not enought, selinux
still will forbid qemu use it, because the shmem-server's process is
not correct, you will find the AVC like this (i set up the ivshmem
server via shell):
type=AVC msg=audit(1437642157.227:73784): avc: denied { connectto } for \
pid=6137 comm="qemu-kvm" path="/tmp/ivshmem_socket" \
scontext=system_u:system_r:svirt_t:s0:c703,c707 \
tcontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tclass=unix_stream_socket
But the problem is we cannot change the running shm-server process label,
We need wait ivshmem-server to be a part of qemu progrem, then setup the
ivshmem-server by libvirt. we cannot do nothing for the ivshmem-server right now.
Luyao Huang (4):
conf: introduce seclabels in shmem device element
security: add security part for shmem device
util: introduce new helpers to manage shmem device
qemu: call the helpers in virshm.c to manage shmem device
configure.ac | 10 +
docs/formatdomain.html.in | 7 +
docs/schemas/domaincommon.rng | 3 +
po/POTFILES.in | 3 +-
src/Makefile.am | 5 +-
src/conf/domain_conf.c | 55 +++-
src/conf/domain_conf.h | 5 +
src/libvirt_private.syms | 18 ++
src/qemu/qemu_conf.h | 3 +
src/qemu/qemu_driver.c | 4 +
src/qemu/qemu_process.c | 158 ++++++++++
src/security/security_dac.c | 67 +++++
src/security/security_driver.h | 11 +
src/security/security_manager.c | 38 +++
src/security/security_manager.h | 8 +
src/security/security_selinux.c | 70 +++++
src/security/security_stack.c | 41 +++
src/util/virshm.c | 623 ++++++++++++++++++++++++++++++++++++++++
src/util/virshm.h | 104 +++++++
19 files changed, 1220 insertions(+), 13 deletions(-)
create mode 100644 src/util/virshm.c
create mode 100644 src/util/virshm.h
--
1.8.3.1
9 years, 4 months
Re: [libvirt] Libvirt Application Development Guide
by Kevin Walker
Hi All
I am currently getting up to speed with the libvirt api, but have noticed
there are some big gaps in the Application Development Guide available on
the libvirt website.
Is there a more recent version of this document available?
Kind regards
Kevin Walker
+968 97651742
9 years, 4 months
[libvirt] [PATCH] rpc: RH1026137: Fix slow volume download (virsh vol-download)
by Ossi Herrala
Use I/O vector (iovec) instead of one huge memory buffer as suggested
in https://bugzilla.redhat.com/show_bug.cgi?id=1026137#c7. This avoids
doing memmove() to big buffers and performance doesn't degrade if
source (virNetClientStreamQueuePacket()) is faster than sink
(virNetClientStreamRecvPacket()).
---
src/rpc/virnetclientstream.c | 134 +++++++++++++++++++++++++----------------
1 files changed, 82 insertions(+), 52 deletions(-)
diff --git a/src/rpc/virnetclientstream.c b/src/rpc/virnetclientstream.c
index b428f4b..18c6e8b 100644
--- a/src/rpc/virnetclientstream.c
+++ b/src/rpc/virnetclientstream.c
@@ -49,9 +49,9 @@ struct _virNetClientStream {
* time by stopping consuming any incoming data
* off the socket....
*/
- char *incoming;
- size_t incomingOffset;
- size_t incomingLength;
+ struct iovec *incomingVec; /* I/O Vector to hold data */
+ size_t writeVec; /* Vectors produced */
+ size_t readVec; /* Vectors consumed */
bool incomingEOF;
virNetClientStreamEventCallback cb;
@@ -86,9 +86,9 @@ virNetClientStreamEventTimerUpdate(virNetClientStreamPtr st)
if (!st->cb)
return;
- VIR_DEBUG("Check timer offset=%zu %d", st->incomingOffset, st->cbEvents);
+ VIR_DEBUG("Check timer readVec %zu writeVec %zu %d", st->readVec, st->writeVec, st->cbEvents);
- if (((st->incomingOffset || st->incomingEOF) &&
+ if ((((st->readVec < st->writeVec) || st->incomingEOF) &&
(st->cbEvents & VIR_STREAM_EVENT_READABLE)) ||
(st->cbEvents & VIR_STREAM_EVENT_WRITABLE)) {
VIR_DEBUG("Enabling event timer");
@@ -110,13 +110,14 @@ virNetClientStreamEventTimer(int timer ATTRIBUTE_UNUSED, void *opaque)
if (st->cb &&
(st->cbEvents & VIR_STREAM_EVENT_READABLE) &&
- (st->incomingOffset || st->incomingEOF))
+ ((st->readVec < st->writeVec) || st->incomingEOF))
events |= VIR_STREAM_EVENT_READABLE;
if (st->cb &&
(st->cbEvents & VIR_STREAM_EVENT_WRITABLE))
events |= VIR_STREAM_EVENT_WRITABLE;
- VIR_DEBUG("Got Timer dispatch %d %d offset=%zu", events, st->cbEvents, st->incomingOffset);
+ VIR_DEBUG("Got Timer dispatch %d %d readVec %zu writeVec %zu", events, st->cbEvents,
+ st->readVec, st->writeVec);
if (events) {
virNetClientStreamEventCallback cb = st->cb;
void *cbOpaque = st->cbOpaque;
@@ -161,7 +162,7 @@ void virNetClientStreamDispose(void *obj)
virNetClientStreamPtr st = obj;
virResetError(&st->err);
- VIR_FREE(st->incoming);
+ VIR_FREE(st->incomingVec);
virObjectUnref(st->prog);
}
@@ -265,38 +266,49 @@ int virNetClientStreamQueuePacket(virNetClientStreamPtr st,
virNetMessagePtr msg)
{
int ret = -1;
- size_t need;
+ struct iovec iov;
+ char *base;
+ size_t piece, pieces, length, offset = 0, size = 1024*1024;
virObjectLock(st);
- need = msg->bufferLength - msg->bufferOffset;
- if (need) {
- size_t avail = st->incomingLength - st->incomingOffset;
- if (need > avail) {
- size_t extra = need - avail;
- if (VIR_REALLOC_N(st->incoming,
- st->incomingLength + extra) < 0) {
- VIR_DEBUG("Out of memory handling stream data");
- goto cleanup;
- }
- st->incomingLength += extra;
- }
- memcpy(st->incoming + st->incomingOffset,
- msg->buffer + msg->bufferOffset,
- msg->bufferLength - msg->bufferOffset);
- st->incomingOffset += (msg->bufferLength - msg->bufferOffset);
- } else {
+ length = msg->bufferLength - msg->bufferOffset;
+
+ if (length == 0) {
st->incomingEOF = true;
+ goto end;
}
- VIR_DEBUG("Stream incoming data offset %zu length %zu EOF %d",
- st->incomingOffset, st->incomingLength,
- st->incomingEOF);
+ pieces = (length + size - 1) / size;
+ for (piece = 0; piece < pieces; piece++) {
+ if (size > length - offset)
+ size = length - offset;
+
+ if (VIR_ALLOC_N(base, size)) {
+ VIR_DEBUG("Allocation failed");
+ goto cleanup;
+ }
+
+ memcpy(base, msg->buffer + msg->bufferOffset + offset, size);
+ iov.iov_base = base;
+ iov.iov_len = size;
+ offset += size;
+
+ if (VIR_APPEND_ELEMENT(st->incomingVec, st->writeVec, iov) < 0) {
+ VIR_DEBUG("Append failed");
+ VIR_FREE(base);
+ goto cleanup;
+ }
+ VIR_DEBUG("Wrote piece of vector. readVec %zu, writeVec %zu size %zu", st->readVec, st->writeVec, size);
+ }
+
+ end:
virNetClientStreamEventTimerUpdate(st);
-
ret = 0;
cleanup:
+ VIR_DEBUG("Stream incoming data readVec %zu writeVec %zu EOF %d",
+ st->readVec, st->writeVec, st->incomingEOF);
virObjectUnlock(st);
return ret;
}
@@ -361,17 +373,21 @@ int virNetClientStreamRecvPacket(virNetClientStreamPtr st,
size_t nbytes,
bool nonblock)
{
- int rv = -1;
+ int ret = -1;
+ size_t partial, offset;
+
+ virObjectLock(st);
+
VIR_DEBUG("st=%p client=%p data=%p nbytes=%zu nonblock=%d",
st, client, data, nbytes, nonblock);
- virObjectLock(st);
- if (!st->incomingOffset && !st->incomingEOF) {
+
+ if ((st->readVec >= st->writeVec) && !st->incomingEOF) {
virNetMessagePtr msg;
- int ret;
+ int rv;
if (nonblock) {
VIR_DEBUG("Non-blocking mode and no data available");
- rv = -2;
+ ret = -2;
goto cleanup;
}
@@ -387,37 +403,51 @@ int virNetClientStreamRecvPacket(virNetClientStreamPtr st,
VIR_DEBUG("Dummy packet to wait for stream data");
virObjectUnlock(st);
- ret = virNetClientSendWithReplyStream(client, msg, st);
+ rv = virNetClientSendWithReplyStream(client, msg, st);
virObjectLock(st);
virNetMessageFree(msg);
- if (ret < 0)
+ if (rv < 0)
goto cleanup;
}
- VIR_DEBUG("After IO %zu", st->incomingOffset);
- if (st->incomingOffset) {
- int want = st->incomingOffset;
- if (want > nbytes)
- want = nbytes;
- memcpy(data, st->incoming, want);
- if (want < st->incomingOffset) {
- memmove(st->incoming, st->incoming + want, st->incomingOffset - want);
- st->incomingOffset -= want;
+ offset = 0;
+ partial = nbytes;
+
+ while (st->incomingVec && (st->readVec < st->writeVec)) {
+ struct iovec *iov = st->incomingVec + st->readVec;
+
+ if (!iov || !iov->iov_base) {
+ VIR_DEBUG("NULL pointer");
+ goto cleanup;
+ }
+
+ if (partial < iov->iov_len) {
+ memcpy(data+offset, iov->iov_base, partial);
+ memmove(iov->iov_base, (char*)iov->iov_base+partial, iov->iov_len-partial);
+ iov->iov_len -= partial;
+ offset += partial;
+ VIR_DEBUG("Consumed %zu, left %zu", partial, iov->iov_len);
+ break;
} else {
- VIR_FREE(st->incoming);
- st->incomingOffset = st->incomingLength = 0;
+ memcpy(data+offset, iov->iov_base, iov->iov_len);
+ VIR_DEBUG("Consumed %zu. Moving to next piece", iov->iov_len);
+ partial -= iov->iov_len;
+ offset += iov->iov_len;
+ VIR_FREE(iov->iov_base);
+ iov->iov_len = 0;
+ st->readVec++;
}
- rv = want;
- } else {
- rv = 0;
+
+ VIR_DEBUG("Read piece of vector. read %zu readVec %zu, writeVec %zu", offset, st->readVec, st->writeVec);
}
+ ret = offset;
virNetClientStreamEventTimerUpdate(st);
cleanup:
virObjectUnlock(st);
- return rv;
+ return ret;
}
--
1.7.1
9 years, 4 months
[libvirt] [PATCH] qemu: fix some api cannot work when disable cpuset in conf
by Luyao Huang
https://bugzilla.redhat.com/show_bug.cgi?id=1244664
If user disable cpuset in qemu.conf, we shouldn't
try to use it, also shouldn't make some command which
can work without cpuset cannot work.
Fix these case:
1. start guest with strict numa policy (we can use libnuma help us).
2. Hot add vcpu.
3. hot add iothread.
Signed-off-by: Luyao Huang <lhuang(a)redhat.com>
---
src/qemu/qemu_cgroup.c | 16 ++++++++--------
src/qemu/qemu_driver.c | 10 +++++++---
2 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index 8ed74ee..640a223 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -1028,10 +1028,6 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
if (virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]) < 0)
goto cleanup;
- if (mem_mask &&
- virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0)
- goto cleanup;
-
if (period || quota) {
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
goto cleanup;
@@ -1041,6 +1037,10 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
virBitmapPtr cpumap = NULL;
+ if (mem_mask &&
+ virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0)
+ goto cleanup;
+
/* try to use the default cpu maps */
if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
cpumap = priv->autoCpuset;
@@ -1205,15 +1205,15 @@ qemuSetupCgroupForIOThreads(virDomainObjPtr vm)
goto cleanup;
}
- if (mem_mask &&
- virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0)
- goto cleanup;
-
/* Set iothreadpin in cgroup if iothreadpin xml is provided */
if (virCgroupHasController(priv->cgroup,
VIR_CGROUP_CONTROLLER_CPUSET)) {
virBitmapPtr cpumask = NULL;
+ if (mem_mask &&
+ virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0)
+ goto cleanup;
+
if (def->iothreadids[i]->cpumask)
cpumask = def->iothreadids[i]->cpumask;
else if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f352a88..bb7cef4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4597,7 +4597,9 @@ qemuDomainAddCgroupForThread(virCgroupPtr cgroup,
if (virCgroupNewThread(cgroup, nameval, idx, true, &new_cgroup) < 0)
return NULL;
- if (mem_mask && virCgroupSetCpusetMems(new_cgroup, mem_mask) < 0)
+ if (mem_mask &&
+ virCgroupHasController(cgroup, VIR_CGROUP_CONTROLLER_CPUSET) &&
+ virCgroupSetCpusetMems(new_cgroup, mem_mask) < 0)
goto error;
/* Add pid/thread to the cgroup */
@@ -4653,7 +4655,8 @@ qemuDomainHotplugPinThread(virBitmapPtr cpumask,
{
int ret = -1;
- if (cgroup) {
+ if (cgroup &&
+ virCgroupHasController(cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
if (qemuSetupCgroupCpusetCpus(cgroup, cpumask) < 0) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("failed to set cpuset.cpus in cgroup for id %d"),
@@ -4896,7 +4899,8 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
goto endjob;
- if (def && !(flags & VIR_DOMAIN_VCPU_GUEST) && virNumaIsAvailable()) {
+ if (def && !(flags & VIR_DOMAIN_VCPU_GUEST) && virNumaIsAvailable() &&
+ virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
false, &cgroup_temp) < 0)
goto endjob;
--
1.8.3.1
9 years, 4 months