[libvirt] [PATCH] qemu: Drop qemuDomainMemoryLimit
by Michal Privoznik
This function is to guess the correct limit for maximal memory
usage by qemu for given domain. This can never be guessed
correctly, not to mention all the pains and sleepless nights this
code has caused. Once somebody discovers algorithm to solve the
Halting Problem, we can compute the limit algorithmically. But
till then, this code should never see the light of the release
again.
---
src/qemu/qemu_cgroup.c | 3 +--
src/qemu/qemu_command.c | 2 +-
src/qemu/qemu_domain.c | 49 -------------------------------------------------
src/qemu/qemu_domain.h | 2 --
src/qemu/qemu_hotplug.c | 2 +-
5 files changed, 3 insertions(+), 55 deletions(-)
diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index dc949db..9673e8e 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -428,8 +428,7 @@ qemuSetupMemoryCgroup(virDomainObjPtr vm)
}
}
- if (virCgroupSetMemoryHardLimit(priv->cgroup,
- qemuDomainMemoryLimit(vm->def)) < 0)
+ if (virCgroupSetMemoryHardLimit(priv->cgroup, vm->def->mem.hard_limit) < 0)
return -1;
if (vm->def->mem.soft_limit != 0 &&
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index b811e1d..a0a1773 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -9220,7 +9220,7 @@ qemuBuildCommandLine(virConnectPtr conn,
}
if (mlock)
- virCommandSetMaxMemLock(cmd, qemuDomainMemoryLimit(def) * 1024);
+ virCommandSetMaxMemLock(cmd, def->mem.hard_limit * 1024);
virObjectUnref(cfg);
return cmd;
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 393af6b..7f4d17d 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -2306,55 +2306,6 @@ cleanup:
return ret;
}
-
-unsigned long long
-qemuDomainMemoryLimit(virDomainDefPtr def)
-{
- unsigned long long mem;
- size_t i;
-
- if (def->mem.hard_limit) {
- mem = def->mem.hard_limit;
- } else {
- /* If there is no hard_limit set, compute a reasonable one to avoid
- * system thrashing caused by exploited qemu. A 'reasonable
- * limit' has been chosen:
- * (1 + k) * (domain memory + total video memory) + (32MB for
- * cache per each disk) + F
- * where k = 0.5 and F = 400MB. The cache for disks is important as
- * kernel cache on the host side counts into the RSS limit.
- * Moreover, VFIO requires some amount for IO space. Alex Williamson
- * suggested adding 1GiB for IO space just to be safe (some finer
- * tuning might be nice, though).
- *
- * Technically, the disk cache does not have to be included in
- * RLIMIT_MEMLOCK but it doesn't hurt as it's just an upper limit and
- * it makes this function and its usage simpler.
- */
- mem = def->mem.max_balloon;
- for (i = 0; i < def->nvideos; i++)
- mem += def->videos[i]->vram;
- mem *= 1.5;
- mem += def->ndisks * 32768;
- mem += 409600;
-
- for (i = 0; i < def->nhostdevs; i++) {
- virDomainHostdevDefPtr hostdev = def->hostdevs[i];
- if (hostdev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS &&
- hostdev->source.subsys.type ==
- VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI &&
- hostdev->source.subsys.u.pci.backend ==
- VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO) {
- mem += 1024 * 1024;
- break;
- }
- }
- }
-
- return mem;
-}
-
-
int
qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
virDomainObjPtr vm)
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 0a4a51e..21f116c 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -365,8 +365,6 @@ extern virDomainXMLPrivateDataCallbacks virQEMUDriverPrivateDataCallbacks;
extern virDomainXMLNamespace virQEMUDriverDomainXMLNamespace;
extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig;
-unsigned long long qemuDomainMemoryLimit(virDomainDefPtr def);
-
int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
virDomainObjPtr vm);
diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index c9748d9..fa64dd7 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -1030,7 +1030,7 @@ int qemuDomainAttachHostPciDevice(virQEMUDriverPtr driver,
*/
vm->def->hostdevs[vm->def->nhostdevs++] = hostdev;
virProcessSetMaxMemLock(vm->pid,
- qemuDomainMemoryLimit(vm->def) * 1024);
+ vm->def->mem.hard_limit * 1024);
vm->def->hostdevs[vm->def->nhostdevs--] = NULL;
}
--
1.8.1.5
11 years, 2 months
[libvirt] [PATCHv3 0/3] Fix two issues when DBus isn't enabled or compiled in
by Peter Krempa
Fix regression in starting VMs and inability to start libvirtd when firewalld
support is compiled in.
Diff to v2:
virDBusHasSystemBus is now a bool function and adaptations to that.
Peter Krempa (3):
virdbus: Add virDBusHasSystemBus()
virsystemd: Don't fail to start VM if DBus isn't available or compiled
in
nwfilter: Don't fail to start if DBus isn't available
src/libvirt_private.syms | 1 +
src/nwfilter/nwfilter_driver.c | 9 +++++++--
src/util/virdbus.c | 33 ++++++++++++++++++++++++++++++---
src/util/virdbus.h | 1 +
src/util/virsystemd.c | 6 ++++--
5 files changed, 43 insertions(+), 7 deletions(-)
--
1.8.3.2
11 years, 2 months
[libvirt] [PATCH] virbitmaptest: Shut coverity up in case of broken test
by Peter Krempa
Coverity reported a memleak in the test added in 7efd5fd1b02. In case
the code will be broken and the code will actually parse a faulty bitmap
the resulting pointer would be leaked. Free it although that shouldn't
ever happen.
---
tests/virbitmaptest.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/virbitmaptest.c b/tests/virbitmaptest.c
index c56d6fa..e00b0a0 100644
--- a/tests/virbitmaptest.c
+++ b/tests/virbitmaptest.c
@@ -492,6 +492,7 @@ test9(const void *opaque ATTRIBUTE_UNUSED)
ret = 0;
cleanup:
+ virBitmapFree(bitmap);
return ret;
}
--
1.8.3.2
11 years, 2 months
[libvirt] [PATCH] docs: Discourage users to set hard_limit
by Michal Privoznik
In one of my previous patches I am removing the hard_limit heuristic to
guess the correct value if none set. However, it turned out, this limit
is hard to guess even for users. We should advise them to not set the
limit as their domains may be OOM killed. Sigh.
---
docs/formatdomain.html.in | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/docs/formatdomain.html.in b/docs/formatdomain.html.in
index 83d551a..93234a4 100644
--- a/docs/formatdomain.html.in
+++ b/docs/formatdomain.html.in
@@ -676,7 +676,10 @@
<dt><code>hard_limit</code></dt>
<dd> The optional <code>hard_limit</code> element is the maximum memory
the guest can use. The units for this value are kibibytes (i.e. blocks
- of 1024 bytes)</dd>
+ of 1024 bytes). <strong>However, users are strongly advised not to set
+ this limit as domain may get killed by the kernel. To determine the
+ memory needed for a process to run is <a href="http://en.wikipedia.org/wiki/Undecidable_problem">
+ undecidable problem</a>.</strong></dd>
<dt><code>soft_limit</code></dt>
<dd> The optional <code>soft_limit</code> element is the memory limit to
enforce during memory contention. The units for this value are
--
1.8.1.5
11 years, 2 months
[libvirt] virtlockd max_clients limitation
by David Weber
Hi,
we recently ran into a problem when trying to start more than 20 guests if
direct locking with virtlockd was enabled. The error message locked like this:
# start test7
error: Failed to start domain test7
error: Cannot recv data: Connection reset by peer
Our research indicated there is a max_clients parameter set in src/locking/lock_daemon.c which causes the limitation.
Simply increasing this number works as a workaround.
I don't have a deep understanding of the code, so I don't know if there is a
bug and the connection should be dropped after the lock is set or if
max_clients really is the way to go. If the latter I guess max_clients should
be configurable like in virtlockd or at least be higher than 20 :)
Libvirt version 1.1.1
Cheers,
David
>From 5187231a973a9956723683c7fad14c8bb3dfcac2 Mon Sep 17 00:00:00 2001
From: David Weber <wb(a)munzinger.de>
Date: Sun, 18 Aug 2013 16:16:36 +0200
Subject: [PATCH] Increase virtlockd max clients
---
src/locking/lock_daemon.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/locking/lock_daemon.c b/src/locking/lock_daemon.c
index c4c1727..d96d0fc 100644
--- a/src/locking/lock_daemon.c
+++ b/src/locking/lock_daemon.c
virLockDaemonNew(bool privileged)
return NULL;
}
- if (!(lockd->srv = virNetServerNew(1, 1, 0, 20,
+ if (!(lockd->srv = virNetServerNew(1, 1, 0, 100,
-1, 0,
false, NULL,
virLockDaemonClientNew,
--
1.8.1.2
11 years, 2 months
[libvirt] [PATCH glib] Remove dead cleanup code in object fetch_list helpers
by Daniel P. Berrange
From: "Daniel P. Berrange" <berrange(a)redhat.com>
The fetch_list helper cleanup code iterates over the
elements in 'lst' array free'ing each one. This is dead
code, however, since the only way to get there is from
codepaths which do not populate 'lst' elements.
This fixes two coverity DEADCODE reports
Signed-off-by: Daniel P. Berrange <berrange(a)redhat.com>
---
libvirt-gobject/libvirt-gobject-connection.c | 6 +-----
libvirt-gobject/libvirt-gobject-storage-pool.c | 6 +-----
2 files changed, 2 insertions(+), 10 deletions(-)
diff --git a/libvirt-gobject/libvirt-gobject-connection.c b/libvirt-gobject/libvirt-gobject-connection.c
index 5c8eb16..687a185 100644
--- a/libvirt-gobject/libvirt-gobject-connection.c
+++ b/libvirt-gobject/libvirt-gobject-connection.c
@@ -719,11 +719,7 @@ static gchar ** fetch_list(virConnectPtr vconn,
return lst;
error:
- if (lst != NULL) {
- for (i = 0 ; i < n; i++)
- g_free(lst[i]);
- g_free(lst);
- }
+ g_free(lst);
return NULL;
}
diff --git a/libvirt-gobject/libvirt-gobject-storage-pool.c b/libvirt-gobject/libvirt-gobject-storage-pool.c
index e02adc8..aa27872 100644
--- a/libvirt-gobject/libvirt-gobject-storage-pool.c
+++ b/libvirt-gobject/libvirt-gobject-storage-pool.c
@@ -347,11 +347,7 @@ static gchar ** fetch_list(virStoragePoolPtr vpool,
return lst;
error:
- if (lst != NULL) {
- for (i = 0 ; i < n; i++)
- g_free(lst[i]);
- g_free(lst);
- }
+ g_free(lst);
return NULL;
}
--
1.8.3.1
11 years, 2 months
[libvirt] [PATCHv2 0/3] Fix two DBus related failures
by Peter Krempa
Some places in the code assumed that either DBus was compiled in
or running:
1: systemd cgroup code failed to start a VM on hosts without DBus installed or running
2: nwfilter driver failed to initialize on hosts without DBus running
This series fixes those issues as we have fallbacks in case DBus isn't available
that we can use.
Peter Krempa (3):
virdbus: Add virDBusHasSystemBus()
virsystemd: Don't fail to start VM if DBus isn't available or compiled
in
nwfilter: Don't fail to start if DBus isn't available
src/libvirt_private.syms | 1 +
src/nwfilter/nwfilter_driver.c | 8 ++++++--
src/util/virdbus.c | 34 +++++++++++++++++++++++++++++++---
src/util/virdbus.h | 1 +
src/util/virsystemd.c | 4 ++--
5 files changed, 41 insertions(+), 7 deletions(-)
--
1.8.3.2
11 years, 2 months
[libvirt] virDomainAttachDevice error during disk hotplug
by Deepak C Shetty
Hi All,
I am trying to do a hotplug of a disk from VDSM (which uses libvirt
to get things done).
I hit the below error "operation failed: open disk image file failed"
comign from virDomainAttachDevice.
Some background:
1) The qemu-img create cmd I use (inside vdsm) to create the file being
hotplugged is
/usr/bin/qemu-img create -f qcow2 -F qcow2 -b
/rhev/data-center/000065de-04b8-42e2-986c-2de664708be7/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399
/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399' (cwd /var/run/vdsm)
I even tried using the rel. backing path instead of abs. path as below..
/usr/bin/qemu-img create -f qcow2 -F qcow2 -b
../../../rhev/data-center/000065de-04b8-42e2-986c-2de664708be7/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399
/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399' (cwd /var/run/vdsm)
but hit the same error during hotplug operation
2) qemu-img info /var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399
image: /var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399
file format: qcow2
virtual size: 3.8G (4096000000 bytes)
disk size: 196K
cluster_size: 65536
backing file:
/rhev/data-center/000065de-04b8-42e2-986c-2de664708be7/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399
backing file format: qcow2
3) ls -l
/rhev/data-center/000065de-04b8-42e2-986c-2de664708be7/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399
-r--r-----. 1 vdsm kvm 197120 Aug 10 11:59
/rhev/data-center/000065de-04b8-42e2-986c-2de664708be7/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399
4) libvirtd.log snippets:
2013-08-10 11:19:41.766+0000: 1103: debug : virDomainAttachDevice:9820 :
dom=0x7f92f4003f20, (VM: name=dpk_BR_vm,
uuid=9999017d-1278-4bfb-8129-62bded257399), xml=<disk device="disk"
snapshot="no" type="file">
<source file="/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399"/>
<target bus="virtio" dev="vdb"/>
<serial>22224c45-6504-4ea1-bd24-12340017dd32</serial>
<driver cache="none" error_policy="stop" io="threads"
name="qemu" type="qcow2"/>
</disk>
2013-08-10 11:19:41.766+0000: 1103: debug :
qemuDomainObjBeginJobInternal:958 : Starting job: modify (async=none)
2013-08-10 11:19:41.766+0000: 1103: debug :
virStorageFileGetMetadata:1007 :
path=/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399 format=9 uid=107
gid=107 probe=0
2013-08-10 11:19:41.766+0000: 1103: debug :
virStorageFileGetMetadataRecurse:939 :
path=/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399 format=9 uid=107
gid=107 probe=0
2013-08-10 11:19:41.767+0000: 1103: debug :
virStorageFileGetMetadataInternal:687 :
path=/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399, fd=25, format=9
2013-08-10 11:19:41.770+0000: 1103: debug :
virStorageFileGetMetadataRecurse:939 :
path=/home/dpkshetty/libSM_disk/localstoragedomain/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399
format=9 uid=107 gid=107 probe=0
2013-08-10 11:19:41.770+0000: 1103: debug :
virStorageFileGetMetadataInternal:687 :
path=/home/dpkshetty/libSM_disk/localstoragedomain/11112d24-4cda-4200-8f6d-a1d8362c70fd/images/22224c45-6504-4ea1-bd24-12340017dd32/3333017d-1278-4bfb-8129-62bded257399,
fd=25, format=9
2013-08-10 11:19:41.770+0000: 1103: debug :
qemuDomainPCIAddressGetNextSlot:1826 : Found free PCI slot 0000:00:05
2013-08-10 11:19:41.770+0000: 1103: debug :
qemuDomainPCIAddressReserveSlot:1710 : Reserving PCI slot 0000:00:05.0
2013-08-10 11:19:41.770+0000: 1103: debug : qemuMonitorAddDrive:2756 :
mon=0x7f92f0007830
drive=file=/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399,if=none,id=drive-virtio-disk1,format=qcow2,serial=22224c45-6504-4ea1-bd24-12340017dd32,cache=none,werror=stop,rerror=stop,aio=threads
2013-08-10 11:19:41.770+0000: 1103: debug : qemuMonitorSend:887 :
QEMU_MONITOR_SEND_MSG: mon=0x7f92f0007830
msg={"execute":"human-monitor-command","arguments":{"command-line":"drive_add
dummy
file=/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399,if=none,id=drive-virtio-disk1,format=qcow2,serial=22224c45-6504-4ea1-bd24-12340017dd32,cache=none,werror=stop,rerror=stop,aio=threads"},"id":"libvirt-67"}^M
fd=-1
2013-08-10 11:19:41.771+0000: 1091: debug : qemuMonitorIOWrite:453 :
QEMU_MONITOR_IO_WRITE: mon=0x7f92f0007830
buf={"execute":"human-monitor-command","arguments":{"command-line":"drive_add
dummy
file=/var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399,if=none,id=drive-virtio-disk1,format=qcow2,serial=22224c45-6504-4ea1-bd24-12340017dd32,cache=none,werror=stop,rerror=stop,aio=threads"},"id":"libvirt-67"}^M
len=292 ret=292 errno=11
2013-08-10 11:19:41.772+0000: 1091: debug : qemuMonitorIOProcess:345 :
QEMU_MONITOR_IO_PROCESS: mon=0x7f92f0007830 buf={"return": "could not
open disk image /var/run/vdsm/3333017d-1278-4bfb-8129-62bded257399:
Invalid argument\r\n", "id": "libvirt-67"}^M
len=134
2013-08-10 11:19:41.772+0000: 1103: error : qemuMonitorTextAddDrive:2697
: operation failed: open disk image file failed
2013-08-10 11:19:41.773+0000: 1103: debug : qemuDomainObjEndJob:1070 :
Stopping job: modify (async=none)
---------------
Any hints on what could be the issues will help. I can try out few
things, if someone could provide suggestions.
thanx,
deepak
11 years, 2 months
[libvirt] [PATCH] Consistently use 'CANCELED' spelling in public API
by Christophe Fergeau
Most constant names in libvirt public API are using the 'CANCELED'
spelling, except for VIR_DOMAIN_JOB_CANCELLED and
VIR_ERR_AUTH_CANCELLED.
This commit changes the spelling used by these 2 symbols to make
it consistant with the rest of the API. For backwards compatibility,
2 #define are introduced using the old spelling (with 2 'L').
Signed-off-by: Christophe Fergeau <cfergeau(a)redhat.com>
---
include/libvirt/libvirt.h.in | 10 +++++++++-
include/libvirt/virterror.h | 10 +++++++++-
2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 52ac95d..91efa8c 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -4031,13 +4031,21 @@ typedef enum {
VIR_DOMAIN_JOB_UNBOUNDED = 2, /* Job without a finite completion time */
VIR_DOMAIN_JOB_COMPLETED = 3, /* Job has finished, but isn't cleaned up */
VIR_DOMAIN_JOB_FAILED = 4, /* Job hit error, but isn't cleaned up */
- VIR_DOMAIN_JOB_CANCELLED = 5, /* Job was aborted, but isn't cleaned up */
+ VIR_DOMAIN_JOB_CANCELED = 5, /* Job was aborted, but isn't cleaned up */
#ifdef VIR_ENUM_SENTINELS
VIR_DOMAIN_JOB_LAST
#endif
} virDomainJobType;
+/**
+ * VIR_DOMAIN_JOB_CANCELLED:
+ *
+ * Deprecated name for VIR_DOMAIN_JOB_CANCELED. Provided for backwards
+ * compatibility.
+ */
+#define VIR_DOMAIN_JOB_CANCELLED VIR_DOMAIN_JOB_CANCELED
+
typedef struct _virDomainJobInfo virDomainJobInfo;
typedef virDomainJobInfo *virDomainJobInfoPtr;
struct _virDomainJobInfo {
diff --git a/include/libvirt/virterror.h b/include/libvirt/virterror.h
index c1960c8..b525f96 100644
--- a/include/libvirt/virterror.h
+++ b/include/libvirt/virterror.h
@@ -282,7 +282,7 @@ typedef enum {
risky domain snapshot revert */
VIR_ERR_OPERATION_ABORTED = 78, /* operation on a domain was
canceled/aborted by user */
- VIR_ERR_AUTH_CANCELLED = 79, /* authentication cancelled */
+ VIR_ERR_AUTH_CANCELED = 79, /* authentication cancelled */
VIR_ERR_NO_DOMAIN_METADATA = 80, /* The metadata is not present */
VIR_ERR_MIGRATE_UNSAFE = 81, /* Migration is not safe */
VIR_ERR_OVERFLOW = 82, /* integer overflow */
@@ -299,6 +299,14 @@ typedef enum {
} virErrorNumber;
/**
+ * VIR_ERR_AUTH_CANCELLED:
+ *
+ * Deprecated name for VIR_ERR_AUTH_CANCELED. Provided for backwards
+ * compatibility.
+ */
+#define VIR_ERR_AUTH_CANCELLED VIR_ERR_AUTH_CANCELED
+
+/**
* virErrorFunc:
* @userData: user provided data for the error callback
* @error: the error being raised.
--
1.8.3.1
11 years, 2 months