[libvirt] [PATCHv2 00/11] bulk stats: QEMU implementation
by Francesco Romani
This patchset enhances the QEMU support
for the new bulk stats API to include
equivalents of these APIs:
virDomainBlockInfo
virDomainGetInfo - for balloon stats
virDomainGetCPUStats
virDomainBlockStatsFlags
virDomainInterfaceStats
virDomainGetVcpusFlags
virDomainGetVcpus
This subset of API is the one oVirt relies on.
Scale/stress test on an oVirt test environment is in progress.
changes in v2: polishing and optimizations.
- incorporated feedback from Li Wei (thanks)
- added documentation
- optimized block group to gather all the information with just
one call to QEMU monitor
- stripped to bare bones merged the 'block info' group into the
'block' group - oVirt actually needs just one stat from there
- reorganized the keys to be more consistent and shorter.
The patchset is organized as follows:
- the first 4 patches do refactoring to extract internal helper
functions to be used by the old API and by the new bulk one.
For block stats on helper is actually added instead of extracted.
- since some groups require access to the QEMU monitor, one patch
extend the internal interface to easily accomodate that
- finally, the last six patches implement the support for the
bulk API.
Francesco Romani (11):
qemu: extract helper to get the current balloon
qemu: extract helper to gather vcpu data
qemu: add helper to get the block stats
qemu: report highest offset into block stats
qemu: bulk stats: pass connection to workers
qemu: bulk stats: implement CPU stats group
qemu: bulk stats: implement balloon group
qemu: bulk stats: implement VCPU group
qemu: bulk stats: implement interface group
qemu: bulk stats: implement block group
qemu: bulk stats: add block allocation information
include/libvirt/libvirt.h.in | 5 +
src/libvirt.c | 47 ++++
src/qemu/qemu_driver.c | 500 +++++++++++++++++++++++++++++++++++++++----
src/qemu/qemu_monitor.c | 23 ++
src/qemu/qemu_monitor.h | 19 ++
src/qemu/qemu_monitor_json.c | 125 +++++++----
src/qemu/qemu_monitor_json.h | 4 +
7 files changed, 639 insertions(+), 84 deletions(-)
--
1.9.3
10 years, 3 months
[libvirt] [PATCH RFC] LXC: add HOME environment variable
by Chen Hanxiao
We lacked of HOME environment variable,
set 'HOME=/' as default.
Signed-off-by: Chen Hanxiao <chenhanxiao(a)cn.fujitsu.com>
---
src/lxc/lxc_container.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/lxc/lxc_container.c b/src/lxc/lxc_container.c
index 1cf2c8f..9df9c04 100644
--- a/src/lxc/lxc_container.c
+++ b/src/lxc/lxc_container.c
@@ -236,6 +236,7 @@ static virCommandPtr lxcContainerBuildInitCmd(virDomainDefPtr vmDef,
virCommandAddEnvString(cmd, "PATH=/bin:/sbin");
virCommandAddEnvString(cmd, "TERM=linux");
virCommandAddEnvString(cmd, "container=lxc-libvirt");
+ virCommandAddEnvString(cmd, "HOME=/");
virCommandAddEnvPair(cmd, "container_uuid", uuidstr);
if (nttyPaths > 1)
virCommandAddEnvPair(cmd, "container_ttys", virBufferCurrentContent(&buf));
--
1.9.0
10 years, 3 months
Re: [libvirt] [PATCH] add migration support for OpenVZ driver
by Hongbin Lu
Resent the email below. Thanks.
On Sun, Aug 31, 2014 at 2:05 PM, Hongbin Lu <hongbin034(a)gmail.com> wrote:
> This patch adds initial migration support to the OpenVZ driver,
> using the VIR_DRV_FEATURE_MIGRATION_PARAMS family of migration
> functions.
> ---
> src/openvz/openvz_conf.h | 5 +-
> src/openvz/openvz_driver.c | 348
> ++++++++++++++++++++++++++++++++++++++++++++
> src/openvz/openvz_driver.h | 10 ++
> 3 files changed, 361 insertions(+), 2 deletions(-)
>
> diff --git a/src/openvz/openvz_conf.h b/src/openvz/openvz_conf.h
> index a7de7d2..33998d6 100644
> --- a/src/openvz/openvz_conf.h
> +++ b/src/openvz/openvz_conf.h
> @@ -35,8 +35,9 @@
>
>
> /* OpenVZ commands - Replace with wrapper scripts later? */
> -# define VZLIST "/usr/sbin/vzlist"
> -# define VZCTL "/usr/sbin/vzctl"
> +# define VZLIST "/usr/sbin/vzlist"
> +# define VZCTL "/usr/sbin/vzctl"
> +# define VZMIGRATE "/usr/sbin/vzmigrate"
> # define VZ_CONF_FILE "/etc/vz/vz.conf"
>
> # define VZCTL_BRIDGE_MIN_VERSION ((3 * 1000 * 1000) + (0 * 1000) + 22 +
> 1)
> diff --git a/src/openvz/openvz_driver.c b/src/openvz/openvz_driver.c
> index 851ed30..0f46872 100644
> --- a/src/openvz/openvz_driver.c
> +++ b/src/openvz/openvz_driver.c
> @@ -2207,6 +2207,348 @@ openvzNodeGetCPUMap(virConnectPtr conn
> ATTRIBUTE_UNUSED,
> }
>
>
> +static int
> +openvzConnectSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int
> feature)
> +{
> + switch (feature) {
> + case VIR_DRV_FEATURE_MIGRATION_PARAMS:
> + case VIR_DRV_FEATURE_MIGRATION_V3:
> + return 1;
> + default:
> + return 0;
> + }
> +}
> +
> +
> +static char *
> +openvzDomainMigrateBegin3Params(virDomainPtr domain,
> + virTypedParameterPtr params,
> + int nparams,
> + char **cookieout ATTRIBUTE_UNUSED,
> + int *cookieoutlen ATTRIBUTE_UNUSED,
> + unsigned int flags)
> +{
> + virDomainObjPtr vm = NULL;
> + struct openvz_driver *driver = domain->conn->privateData;
> + char *xml = NULL;
> + int status;
> +
> + virCheckFlags(OPENVZ_MIGRATION_FLAGS, NULL);
> + if (virTypedParamsValidate(params, nparams,
> OPENVZ_MIGRATION_PARAMETERS) < 0)
> + return NULL;
> +
> + openvzDriverLock(driver);
> + vm = virDomainObjListFindByUUID(driver->domains, domain->uuid);
> + openvzDriverUnlock(driver);
> +
> + if (!vm) {
> + virReportError(VIR_ERR_NO_DOMAIN, "%s",
> + _("no domain with matching uuid"));
> + goto cleanup;
> + }
> +
> + if (!virDomainObjIsActive(vm)) {
> + virReportError(VIR_ERR_OPERATION_INVALID,
> + "%s", _("domain is not running"));
> + goto cleanup;
> + }
> +
> + if (openvzGetVEStatus(vm, &status, NULL) == -1)
> + goto cleanup;
> +
> + if (status != VIR_DOMAIN_RUNNING) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("domain is not in running state"));
> + goto cleanup;
> + }
> +
> + xml = virDomainDefFormat(vm->def, VIR_DOMAIN_XML_SECURE);
> +
> + cleanup:
> + if (vm)
> + virObjectUnlock(vm);
> + return xml;
> +}
> +
> +static int
> +openvzDomainMigratePrepare3Params(virConnectPtr dconn,
> + virTypedParameterPtr params,
> + int nparams,
> + const char *cookiein ATTRIBUTE_UNUSED,
> + int cookieinlen ATTRIBUTE_UNUSED,
> + char **cookieout ATTRIBUTE_UNUSED,
> + int *cookieoutlen ATTRIBUTE_UNUSED,
> + char **uri_out,
> + unsigned int fflags ATTRIBUTE_UNUSED)
> +{
> + struct openvz_driver *driver = dconn->privateData;
> + const char *dom_xml = NULL;
> + const char *uri_in = NULL;
> + virDomainDefPtr def = NULL;
> + virDomainObjPtr vm = NULL;
> + char *hostname = NULL;
> + virURIPtr uri = NULL;
> + int ret = -1;
> +
> + if (virTypedParamsValidate(params, nparams,
> OPENVZ_MIGRATION_PARAMETERS) < 0)
> + goto error;
> +
> + if (virTypedParamsGetString(params, nparams,
> + VIR_MIGRATE_PARAM_DEST_XML,
> + &dom_xml) < 0 ||
> + virTypedParamsGetString(params, nparams,
> + VIR_MIGRATE_PARAM_URI,
> + &uri_in) < 0)
> + goto error;
> +
> + if (!dom_xml) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("no domain XML passed"));
> + goto error;
> + }
> +
> + if (!(def = virDomainDefParseString(dom_xml, driver->caps,
> driver->xmlopt,
> + 1 << VIR_DOMAIN_VIRT_OPENVZ,
> + VIR_DOMAIN_XML_INACTIVE)))
> + goto error;
> +
> + if (!(vm = virDomainObjListAdd(driver->domains, def,
> + driver->xmlopt,
> + VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
> + VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
> + NULL)))
> + goto error;
> + def = NULL;
> +
> + if (!uri_in) {
> + if ((hostname = virGetHostname()) == NULL)
> + goto error;
> +
> + if (STRPREFIX(hostname, "localhost")) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("hostname on destination resolved to
> localhost,"
> + " but migration requires an FQDN"));
> + goto error;
> + }
> + } else {
> + uri = virURIParse(uri_in);
> +
> + if (uri == NULL) {
> + virReportError(VIR_ERR_INVALID_ARG,
> + _("unable to parse URI: %s"),
> + uri_in);
> + goto error;
> + }
> +
> + if (uri->server == NULL) {
> + virReportError(VIR_ERR_INVALID_ARG,
> + _("missing host in migration URI: %s"),
> + uri_in);
> + goto error;
> + } else {
> + hostname = uri->server;
> + }
> + }
> +
> + if (virAsprintf(uri_out, "tcp://%s", hostname) < 0)
> + goto error;
> +
> + ret = 0;
> + goto done;
> +
> + error:
> + virDomainDefFree(def);
> + if (vm) {
> + virDomainObjListRemove(driver->domains, vm);
> + vm = NULL;
> + }
> +
> + done:
> + virURIFree(uri);
> + if (vm)
> + virObjectUnlock(vm);
> + return ret;
> +}
> +
> +static int
> +openvzDomainMigratePerform3Params(virDomainPtr domain,
> + const char *dconnuri ATTRIBUTE_UNUSED,
> + virTypedParameterPtr params,
> + int nparams,
> + const char *cookiein ATTRIBUTE_UNUSED,
> + int cookieinlen ATTRIBUTE_UNUSED,
> + char **cookieout ATTRIBUTE_UNUSED,
> + int *cookieoutlen ATTRIBUTE_UNUSED,
> + unsigned int flags)
> +{
> + struct openvz_driver *driver = domain->conn->privateData;
> + virDomainObjPtr vm = NULL;
> + const char *uri_str = NULL;
> + virURIPtr uri = NULL;
> + virCommandPtr cmd = virCommandNew(VZMIGRATE);
> + int ret = -1;
> +
> + virCheckFlags(OPENVZ_MIGRATION_FLAGS, -1);
> + if (virTypedParamsValidate(params, nparams,
> OPENVZ_MIGRATION_PARAMETERS) < 0)
> + goto cleanup;
> +
> + if (virTypedParamsGetString(params, nparams,
> + VIR_MIGRATE_PARAM_URI,
> + &uri_str) < 0)
> + goto cleanup;
> +
> + openvzDriverLock(driver);
> + vm = virDomainObjListFindByUUID(driver->domains, domain->uuid);
> + openvzDriverUnlock(driver);
> +
> + if (!vm) {
> + virReportError(VIR_ERR_NO_DOMAIN, "%s",
> + _("no domain with matching uuid"));
> + goto cleanup;
> + }
> +
> + /* parse dst host:port from uri */
> + uri = virURIParse(uri_str);
> + if (uri == NULL || uri->server == NULL)
> + goto cleanup;
> +
> + if (flags & VIR_MIGRATE_LIVE)
> + virCommandAddArg(cmd, "--live");
> + virCommandAddArg(cmd, uri->server);
> + virCommandAddArg(cmd, vm->def->name);
> +
> + if (virCommandRun(cmd, NULL) < 0)
> + goto cleanup;
> +
> + ret = 0;
> +
> + cleanup:
> + virCommandFree(cmd);
> + virURIFree(uri);
> + if (vm)
> + virObjectUnlock(vm);
> + return ret;
> +}
> +
> +static virDomainPtr
> +openvzDomainMigrateFinish3Params(virConnectPtr dconn,
> + virTypedParameterPtr params,
> + int nparams,
> + const char *cookiein ATTRIBUTE_UNUSED,
> + int cookieinlen ATTRIBUTE_UNUSED,
> + char **cookieout ATTRIBUTE_UNUSED,
> + int *cookieoutlen ATTRIBUTE_UNUSED,
> + unsigned int flags,
> + int cancelled)
> +{
> + struct openvz_driver *driver = dconn->privateData;
> + virDomainObjPtr vm = NULL;
> + const char *dname = NULL;
> + virDomainPtr dom = NULL;
> + int status;
> +
> + if (cancelled)
> + goto cleanup;
> +
> + virCheckFlags(OPENVZ_MIGRATION_FLAGS, NULL);
> + if (virTypedParamsValidate(params, nparams,
> OPENVZ_MIGRATION_PARAMETERS) < 0)
> + goto cleanup;
> +
> + if (virTypedParamsGetString(params, nparams,
> + VIR_MIGRATE_PARAM_DEST_NAME,
> + &dname) < 0)
> + goto cleanup;
> +
> + if (!dname ||
> + !(vm = virDomainObjListFindByName(driver->domains, dname))) {
> + /* Migration obviously failed if the domain doesn't exist */
> + virReportError(VIR_ERR_OPERATION_FAILED,
> + _("Migration failed. No domain on destination host
> "
> + "with matching name '%s'"),
> + NULLSTR(dname));
> + goto cleanup;
> + }
> +
> + if (openvzGetVEStatus(vm, &status, NULL) == -1)
> + goto cleanup;
> +
> + if (status != VIR_DOMAIN_RUNNING) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("domain is not running on destination host"));
> + goto cleanup;
> + }
> +
> + vm->def->id = strtoI(vm->def->name);
> + virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
> VIR_DOMAIN_RUNNING_MIGRATED);
> +
> + dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
> + if (dom)
> + dom->id = vm->def->id;
> +
> + cleanup:
> + if (vm)
> + virObjectUnlock(vm);
> + return dom;
> +}
> +
> +static int
> +openvzDomainMigrateConfirm3Params(virDomainPtr domain,
> + virTypedParameterPtr params,
> + int nparams,
> + const char *cookiein ATTRIBUTE_UNUSED,
> + int cookieinlen ATTRIBUTE_UNUSED,
> + unsigned int flags,
> + int cancelled)
> +{
> + struct openvz_driver *driver = domain->conn->privateData;
> + virDomainObjPtr vm = NULL;
> + int status;
> + int ret = -1;
> +
> + virCheckFlags(OPENVZ_MIGRATION_FLAGS, -1);
> + if (virTypedParamsValidate(params, nparams,
> OPENVZ_MIGRATION_PARAMETERS) < 0)
> + goto cleanup;
> +
> + openvzDriverLock(driver);
> + vm = virDomainObjListFindByUUID(driver->domains, domain->uuid);
> + openvzDriverUnlock(driver);
> +
> + if (!vm) {
> + virReportError(VIR_ERR_NO_DOMAIN, "%s",
> + _("no domain with matching uuid"));
> + goto cleanup;
> + }
> +
> + if (cancelled) {
> + if (openvzGetVEStatus(vm, &status, NULL) == -1)
> + goto cleanup;
> +
> + if (status == VIR_DOMAIN_RUNNING) {
> + ret = 0;
> + } else {
> + VIR_DEBUG("Domain '%s' does not recover after failed
> migration",
> + vm->def->name);
> + }
> +
> + goto cleanup;
> + }
> +
> + vm->def->id = -1;
> +
> + VIR_DEBUG("Domain '%s' successfully migrated", vm->def->name);
> +
> + virDomainObjListRemove(driver->domains, vm);
> + vm = NULL;
> +
> + ret = 0;
> +
> + cleanup:
> + if (vm)
> + virObjectUnlock(vm);
> + return ret;
> +}
> +
> +
> static virDriver openvzDriver = {
> .no = VIR_DRV_OPENVZ,
> .name = "OPENVZ",
> @@ -2265,6 +2607,12 @@ static virDriver openvzDriver = {
> .connectIsAlive = openvzConnectIsAlive, /* 0.9.8 */
> .domainUpdateDeviceFlags = openvzDomainUpdateDeviceFlags, /* 0.9.13 */
> .domainGetHostname = openvzDomainGetHostname, /* 0.10.0 */
> + .connectSupportsFeature = openvzConnectSupportsFeature, /* 1.2.8 */
> + .domainMigrateBegin3Params = openvzDomainMigrateBegin3Params, /*
> 1.2.8 */
> + .domainMigratePrepare3Params = openvzDomainMigratePrepare3Params, /*
> 1.2.8 */
> + .domainMigratePerform3Params = openvzDomainMigratePerform3Params, /*
> 1.2.8 */
> + .domainMigrateFinish3Params = openvzDomainMigrateFinish3Params, /*
> 1.2.8 */
> + .domainMigrateConfirm3Params = openvzDomainMigrateConfirm3Params, /*
> 1.2.8 */
> };
>
> int openvzRegister(void)
> diff --git a/src/openvz/openvz_driver.h b/src/openvz/openvz_driver.h
> index b39e81c..0c7a070 100644
> --- a/src/openvz/openvz_driver.h
> +++ b/src/openvz/openvz_driver.h
> @@ -31,6 +31,16 @@
>
> # include "internal.h"
>
> +# define OPENVZ_MIGRATION_FLAGS \
> + (VIR_MIGRATE_LIVE)
> +
> +/* All supported migration parameters and their types. */
> +# define OPENVZ_MIGRATION_PARAMETERS \
> + VIR_MIGRATE_PARAM_URI, VIR_TYPED_PARAM_STRING, \
> + VIR_MIGRATE_PARAM_DEST_NAME, VIR_TYPED_PARAM_STRING, \
> + VIR_MIGRATE_PARAM_DEST_XML, VIR_TYPED_PARAM_STRING, \
> + NULL
> +
> int openvzRegister(void);
>
> #endif
> --
> 1.7.1
>
>
10 years, 3 months
[libvirt] [PATCH] apparmor: allow reading cap_last_cap
by Felix Geyer
libcap-ng >= 0.7.4 fails when it can't read /sys/kernel/cap_last_cap
and thus running a qemu guest fails.
Allow reading cap_last_cap in the libvirt-qemu apparmor abstraction.
---
examples/apparmor/libvirt-qemu | 1 +
1 file changed, 1 insertion(+)
diff --git a/examples/apparmor/libvirt-qemu b/examples/apparmor/libvirt-qemu
index 83814ec..1a98182 100644
--- a/examples/apparmor/libvirt-qemu
+++ b/examples/apparmor/libvirt-qemu
@@ -21,6 +21,7 @@
/dev/ptmx rw,
/dev/kqemu rw,
@{PROC}/*/status r,
+ @{PROC}/sys/kernel/cap_last_cap r,
# For hostdev access. The actual devices will be added dynamically
/sys/bus/usb/devices/ r,
--
2.1.0
10 years, 3 months
[libvirt] [PATCH 0/4] Introduce new cputune event
by Pavel Hrdina
This patch series introduces new cputune event to inform
management applications about every change of cputune values
for running domains.
Pavel Hrdina (4):
domain_conf: separate cputune struct from virDomainDef
event: introduce new event for cputune
add an example how to use cputune event
cputune_event: queue the event for cputune updates
daemon/remote.c | 87 ++++++++++++++++++++++++++
examples/object-events/event-test.c | 39 +++++++++++-
include/libvirt/libvirt.h.in | 62 +++++++++++++++++++
src/conf/domain_conf.h | 27 ++++----
src/conf/domain_event.c | 120 ++++++++++++++++++++++++++++++++++++
src/conf/domain_event.h | 7 +++
src/libvirt_private.syms | 2 +
src/qemu/qemu_cgroup.c | 6 ++
src/qemu/qemu_driver.c | 27 ++++++++
src/remote/remote_driver.c | 110 +++++++++++++++++++++++++++++++++
src/remote/remote_protocol.x | 39 +++++++++++-
src/remote_protocol-structs | 32 ++++++++++
tools/virsh-domain.c | 49 +++++++++++++++
13 files changed, 594 insertions(+), 13 deletions(-)
--
1.8.5.5
10 years, 3 months
[libvirt] [PATCH] tests: force FIPS testing mode with new enough GNU TLS versions
by Giuseppe Scrivano
Signed-off-by: Giuseppe Scrivano <gscrivan(a)redhat.com>
---
tests/virnettlscontexttest.c | 2 ++
tests/virnettlssessiontest.c | 2 ++
2 files changed, 4 insertions(+)
diff --git a/tests/virnettlscontexttest.c b/tests/virnettlscontexttest.c
index 51a0369..a3e24a3 100644
--- a/tests/virnettlscontexttest.c
+++ b/tests/virnettlscontexttest.c
@@ -113,6 +113,8 @@ mymain(void)
{
int ret = 0;
+ setenv("GNUTLS_FORCE_FIPS_MODE", "2", 1);
+
testTLSInit(KEYFILE);
# define DO_CTX_TEST(_isServer, _caCrt, _crt, _expectFail) \
diff --git a/tests/virnettlssessiontest.c b/tests/virnettlssessiontest.c
index 1e2683c..3af948a 100644
--- a/tests/virnettlssessiontest.c
+++ b/tests/virnettlssessiontest.c
@@ -240,6 +240,8 @@ mymain(void)
{
int ret = 0;
+ setenv("GNUTLS_FORCE_FIPS_MODE", "2", 1);
+
testTLSInit(KEYFILE);
# define DO_SESS_TEST(_caCrt, _serverCrt, _clientCrt, _expectServerFail, \
--
1.9.3
10 years, 3 months
[libvirt] [PATCH] selinux: Avoid label reservations for type = none during restart
by Shivaprasad G Bhat
The problem is libvirt kills the guests during libvirt restart if more than
guest has security type as none. This is because, libvirt as part of guest-
reconnect tries to reserve the security labels. In case of type=none, the range
of security context happen to be same for several guests. During reservation,
the second attempt to reserve the same range fails and the Guests would be
killed. The fix is to avoid reserving labels for type = none during libvirt
restart.
Signed-off-by: Shivaprasad G Bhat <sbhat(a)linux.vnet.ibm.com>
---
src/security/security_selinux.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/security/security_selinux.c b/src/security/security_selinux.c
index 008c58c..2f8a7f2 100644
--- a/src/security/security_selinux.c
+++ b/src/security/security_selinux.c
@@ -739,7 +739,8 @@ virSecuritySELinuxReserveSecurityLabel(virSecurityManagerPtr mgr,
virSecurityLabelDefPtr seclabel;
seclabel = virDomainDefGetSecurityLabelDef(def, SECURITY_SELINUX_NAME);
- if (!seclabel || seclabel->type == VIR_DOMAIN_SECLABEL_STATIC)
+ if (!seclabel || seclabel->type == VIR_DOMAIN_SECLABEL_STATIC ||
+ seclabel->type == VIR_DOMAIN_SECLABEL_NONE)
return 0;
if (getpidcon_raw(pid, &pctx) == -1) {
10 years, 3 months
[libvirt] [PATCH] security: fix DH key generation when FIPS mode is on
by Giuseppe Scrivano
When FIPS mode is on, gnutls_dh_params_generate2 will fail if 1024 is
specified as the prime's number of bits, a bigger value works in both
cases.
Signed-off-by: Giuseppe Scrivano <gscrivan(a)redhat.com>
---
with the development version of GNU TLS is possible to test FIPS mode
setting the env variable GNUTLS_FORCE_FIPS_MODE=2
src/rpc/virnettlscontext.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/rpc/virnettlscontext.c b/src/rpc/virnettlscontext.c
index 31aac9d..947038d 100644
--- a/src/rpc/virnettlscontext.c
+++ b/src/rpc/virnettlscontext.c
@@ -43,7 +43,7 @@
#include "virthread.h"
#include "configmake.h"
-#define DH_BITS 1024
+#define DH_BITS 2048
#define LIBVIRT_PKI_DIR SYSCONFDIR "/pki"
#define LIBVIRT_CACERT LIBVIRT_PKI_DIR "/CA/cacert.pem"
--
1.9.3
10 years, 3 months
[libvirt] [PATCH] qemu: numatune/domiftune no support in session mode
by Erik Skultety
Tuning NUMA or network interface parameters require root
privileges, thus an attempt to set some of these parameters in
session mode should be invalid followed by an error. As an example might
be memory tuning which raises an error in such case. This patch
provides similar behavior for numatune and domiftune.
Resolves https://bugzilla.redhat.com/show_bug.cgi?id=1126762
---
src/qemu/qemu_command.c | 33 ++++++++++++++++++++++++++++++++-
src/qemu/qemu_driver.c | 20 ++++++++++++++++++++
2 files changed, 52 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index c84c7c3..c021080 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -7443,7 +7443,7 @@ qemuBuildCommandLine(virConnectPtr conn,
emulator = def->emulator;
if (!cfg->privileged) {
- /* If we have no cgroups than we can have no tunings that
+ /* If we have no cgroups then we can have no tunings that
* require them */
if (def->mem.hard_limit || def->mem.soft_limit ||
@@ -7466,6 +7466,37 @@ qemuBuildCommandLine(virConnectPtr conn,
_("CPU tuning is not available in session mode"));
goto error;
}
+
+ if (virDomainNumatuneGetNodeset(def->numatune, NULL, -1) ||
+ virDomainNumatuneGetMode(def->numatune, -1)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("NUMA tuning is not available in session mode"));
+ goto error;
+ }
+
+ virDomainNetDefPtr *nets = def->nets;
+ size_t nnets = def->nnets;
+ for (i = 0; i < nnets; i++) {
+ if (nets[i]->bandwidth) {
+ if (nets[i]->bandwidth->in &&
+ (nets[i]->bandwidth->in->average ||
+ nets[i]->bandwidth->in->peak ||
+ nets[i]->bandwidth->in->burst)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Network bandwidth tuning is not available in session mode"));
+ goto error;
+ }
+
+ if (nets[i]->bandwidth->out &&
+ (nets[i]->bandwidth->out->average ||
+ nets[i]->bandwidth->out->peak ||
+ nets[i]->bandwidth->out->burst)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Network bandwidth tuning is not available in session mode"));
+ goto error;
+ }
+ }
+ }
}
for (i = 0; i < def->ngraphics; ++i) {
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 239a300..b46e12f 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -8779,6 +8779,12 @@ qemuDomainSetNumaParameters(virDomainPtr dom,
if (virDomainSetNumaParametersEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
+ if (!cfg->privileged) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("NUMA tuning is not available in session mode"));
+ goto cleanup;
+ }
+
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
@@ -8870,6 +8876,7 @@ qemuDomainGetNumaParameters(virDomainPtr dom,
size_t i;
virDomainObjPtr vm = NULL;
virDomainDefPtr persistentDef = NULL;
+ virQEMUDriverConfigPtr cfg = NULL;
char *nodeset = NULL;
int ret = -1;
virCapsPtr caps = NULL;
@@ -8888,10 +8895,17 @@ qemuDomainGetNumaParameters(virDomainPtr dom,
return -1;
priv = vm->privateData;
+ cfg = virQEMUDriverGetConfig(driver);
if (virDomainGetNumaParametersEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
+ if (!cfg->privileged) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("NUMA tuning is not available in session mode"));
+ goto cleanup;
+ }
+
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
@@ -9889,6 +9903,12 @@ qemuDomainSetInterfaceParameters(virDomainPtr dom,
if (virDomainSetInterfaceParametersEnsureACL(dom->conn, vm->def, flags) < 0)
goto cleanup;
+ if (!cfg->privileged) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("Network bandwidth tuning is not available in session mode"));
+ goto cleanup;
+ }
+
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
--
1.9.3
10 years, 3 months
[libvirt] [PATCH 0/6] Coverity patches to resolve RESOURCE_LEAK
by Wang Rui
Another six pathes to fix resource leak.
But this may not be the end.
Wang Rui (6):
tests: Resolve Coverity RESOURCE_LEAK in commandhelper
test_conf: Resolve Coverity RESOURCE_LEAK
remote: Resolve Coverity RESOURCE_LEAK
qemu_process: Resolve Coverity RESOURCE_LEAK
vircgroup: Resolve Coverity RESOURCE_LEAK
lxc_container: Resolve Coverity RESOURCE_LEAK
daemon/remote.c | 4 +++-
src/lxc/lxc_container.c | 4 ++++
src/qemu/qemu_process.c | 1 +
src/util/vircgroup.c | 2 +-
tests/commandhelper.c | 15 +++++++++------
tests/test_conf.c | 4 ++--
6 files changed, 20 insertions(+), 10 deletions(-)
--
1.7.12.4
10 years, 3 months