[libvirt] [RFC PATCH 1/1] gluster: cache glfs connection object per volume
by Prasanna Kumar Kalever
This patch offer,
1. Optimizing the calls to glfs_init() and friends
2. Temporarily reduce the memory leak appear in libvirt process account,
even if the root cause for the leaks are glfs_fini() (7 - 10MB per object)
[Hopefully gluster should address this in its future releases, not very near]
Currently, a start of a VM will call 2 glfs_new/glfs_init (which will create
glfs object, once for stat, read headers and next to chown) and then will fork
qemu process which will call once again (for actual read write IO).
Not that all, in case if we are have 4 extra attached disks, then the total
calls to glfs_init() and friends will be (4+1)*2 in libvirt and (4+1)*1 in
qemu space i.e 15 calls. Since we don't have control over qemu process as that
executes in a different process environment, lets do not bother much about it.
This patch shrinks these 10 calls (i.e objects from above example) to just
one, by maintaining a cache of glfs objects.
The glfs object is shared across other only if volume name and all the
volfile servers match. In case of hit glfs object takes a ref and
only on close unref happens.
Thanks to 'Peter Krempa' for the discussion.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever(a)redhat.com>
---
WORK IN PROGRESS: (WIP)
----------------
While initially caching the glfs object, i.e. in
virStorageBackendGlusterSetPreopened() I have took a ref=2, so on the following
virStorageBackendGlusterClosePreopened() --ref will make ref=1, which will
help not cleaning up the object which has to be shared with next coming disks
(if conditions meat).
Given some context, the idea is that on time-out (or after all disks are
initiallized) some one should call virStorageBackendGlusterClosePreopened()
which will ideally make ref=0, hence cached object will be cleaned/deleted
calling glfs_fini()
I had a thought of doing the time-out cleanup call in
virSecurityManagerSetAllLabel() or similar, but that looks too odd for me?
Some help ?
Thanks in advance.
---
src/storage/storage_backend_gluster.c | 136 ++++++++++++++++++++++++++++++++--
src/storage/storage_backend_gluster.h | 33 ++++++++-
2 files changed, 160 insertions(+), 9 deletions(-)
diff --git a/src/storage/storage_backend_gluster.c b/src/storage/storage_backend_gluster.c
index 8e86704..4f53ebc 100644
--- a/src/storage/storage_backend_gluster.c
+++ b/src/storage/storage_backend_gluster.c
@@ -47,19 +47,132 @@ struct _virStorageBackendGlusterState {
char *dir; /* dir from URI, or "/"; always starts and ends in '/' */
};
+virGlusterDefPtr ConnCache = {0,};
+
typedef struct _virStorageBackendGlusterState virStorageBackendGlusterState;
typedef virStorageBackendGlusterState *virStorageBackendGlusterStatePtr;
+void
+virStorageBackendGlusterSetPreopened(virStorageSourcePtr src, glfs_t *fs)
+{
+ size_t i;
+ virStorageBackendGlusterStatePtrPreopened entry = NULL;
+
+ if (ConnCache == NULL && (VIR_ALLOC(ConnCache) < 0))
+ return;
+
+ for (i = 0; i < ConnCache->nConn; i++) {
+ if (STREQ(ConnCache->Conn[i]->volname, src->volume))
+ return;
+ }
+
+ if (VIR_ALLOC(entry) < 0)
+ goto L1;
+
+ if (VIR_STRDUP(entry->volname, src->volume) < 0)
+ goto L1;
+
+ entry->nhosts = src->nhosts;
+ for (i = 0; i < src->nhosts; i++) {
+ if (VIR_ALLOC_N(entry->hosts[i], strlen(src->hosts[i].name) + 1) < 0)
+ goto L2;
+ strcpy(entry->hosts[i], src->hosts[i].name);
+ }
+
+ entry->fs = fs;
+ entry->ref = 2; /* persist glfs obj per volume until a final timeout
+ virStorageBackendGlusterClosePreopened() is called */
+
+ if (VIR_INSERT_ELEMENT(ConnCache->Conn, -1, ConnCache->nConn, entry) < 0)
+ goto L2;
+
+ return;
+
+L2:
+ for (i = 0; i < entry->nhosts; i++)
+ VIR_FREE(entry->hosts[i]);
+L1:
+ if(ConnCache->nConn == 0)
+ VIR_FREE(ConnCache);
+ VIR_FREE(entry->volname);
+ VIR_FREE(entry);
+}
+
+glfs_t *
+virStorageBackendGlusterFindPreopened(virStorageSourcePtr src)
+{
+ size_t i, j, k, ret = 0;
+ size_t min, max;
+
+ if (ConnCache == NULL)
+ return NULL;
+
+ virStorageBackendGlusterStatePtrPreopened entry;
+
+ for (i = 0; i < ConnCache->nConn; i++) {
+ entry = ConnCache->Conn[i];
+ if (STREQ(entry->volname, src->volume)) {
+ min = entry->nhosts < src->nhosts ? entry->nhosts : src->nhosts;
+ max = entry->nhosts >= src->nhosts ? entry->nhosts : src->nhosts;
+ for (j = 0; j< min; j++) {
+ if (entry->nhosts == min) {
+ for (k = 0; k < max; k++) {
+ if (STREQ(entry->hosts[j], src->hosts[k].name)) {
+ ret = 1;
+ break;
+ }
+ }
+ if (!ret)
+ return NULL;
+ } else {
+ for (k = 0; k < max; k++) {
+ if (STREQ(src->hosts[j].name, entry->hosts[k])) {
+ ret = 1;
+ break;
+ }
+ }
+ if (!ret)
+ return NULL;
+ }
+ }
+ entry->ref++;
+ return entry->fs;
+ }
+ }
+ return NULL;
+}
+
+int
+virStorageBackendGlusterClosePreopened(glfs_t *fs)
+{
+ size_t i;
+ int ret = 0;
+
+ if (fs == NULL)
+ return ret;
+
+ for (i = 0; i < ConnCache->nConn; i++) {
+ if (ConnCache->Conn[i]->fs == fs) {
+ if (--ConnCache->Conn[i]->ref)
+ return ret;
+
+ ret = glfs_fini(ConnCache->Conn[i]->fs);
+ VIR_FREE(ConnCache->Conn[i]->volname);
+ VIR_FREE(ConnCache->Conn[i]);
+
+ VIR_DELETE_ELEMENT(ConnCache->Conn, i, ConnCache->nConn);
+ }
+ }
+ return ret;
+}
+
static void
virStorageBackendGlusterClose(virStorageBackendGlusterStatePtr state)
{
if (!state)
return;
- /* Yuck - glusterfs-api-3.4.1 appears to always return -1 for
- * glfs_fini, with errno containing random data, so there's no way
- * to tell if it succeeded. 3.4.2 is supposed to fix this.*/
- if (state->vol && glfs_fini(state->vol) < 0)
+ if (state->vol && virStorageBackendGlusterClosePreopened(state->vol) < 0)
VIR_DEBUG("shutdown of gluster volume %s failed with errno %d",
state->volname, errno);
@@ -556,8 +669,7 @@ virStorageFileBackendGlusterDeinit(virStorageSourcePtr src)
src, src->hosts->name, src->hosts->port ? src->hosts->port : "0",
src->volume, src->path);
- if (priv->vol)
- glfs_fini(priv->vol);
+ virStorageBackendGlusterClosePreopened(priv->vol);
VIR_FREE(priv->canonpath);
VIR_FREE(priv);
@@ -630,11 +742,20 @@ virStorageFileBackendGlusterInit(virStorageSourcePtr src)
src, priv, src->volume, src->path,
(unsigned int)src->drv->uid, (unsigned int)src->drv->gid);
+
+ priv->vol = virStorageBackendGlusterFindPreopened(src);
+ if (priv->vol) {
+ src->drv->priv = priv;
+ return 0;
+ }
+
if (!(priv->vol = glfs_new(src->volume))) {
virReportOOMError();
goto error;
}
+ virStorageBackendGlusterSetPreopened(src, priv->vol);
+
for (i = 0; i < src->nhosts; i++) {
if (virStorageFileBackendGlusterInitServer(priv, src->hosts + i) < 0)
goto error;
@@ -652,8 +773,7 @@ virStorageFileBackendGlusterInit(virStorageSourcePtr src)
return 0;
error:
- if (priv->vol)
- glfs_fini(priv->vol);
+ virStorageBackendGlusterClosePreopened(priv->vol);
VIR_FREE(priv);
return -1;
diff --git a/src/storage/storage_backend_gluster.h b/src/storage/storage_backend_gluster.h
index 6796016..a0326aa 100644
--- a/src/storage/storage_backend_gluster.h
+++ b/src/storage/storage_backend_gluster.h
@@ -22,9 +22,40 @@
#ifndef __VIR_STORAGE_BACKEND_GLUSTER_H__
# define __VIR_STORAGE_BACKEND_GLUSTER_H__
-# include "storage_backend.h"
+#include "storage_backend.h"
+#include <glusterfs/api/glfs.h>
extern virStorageBackend virStorageBackendGluster;
extern virStorageFileBackend virStorageFileBackendGluster;
+
+struct _virStorageBackendGlusterStatePreopened {
+ char *volname;
+ size_t nhosts;
+ char *hosts[1024]; /* FIXME: 1024 ? */
+ glfs_t *fs;
+ int ref;
+};
+
+typedef struct _virStorageBackendGlusterStatePreopened virStorageBackendGlusterStatePreopened;
+typedef virStorageBackendGlusterStatePreopened *virStorageBackendGlusterStatePtrPreopened;
+
+struct _virGlusterDef {
+ size_t nConn;
+ virStorageBackendGlusterStatePtrPreopened *Conn;
+};
+
+typedef struct _virGlusterDef virGlusterDef;
+typedef virGlusterDef *virGlusterDefPtr;
+
+extern virGlusterDefPtr ConnCache;
+
+void
+virStorageBackendGlusterSetPreopened(virStorageSourcePtr src, glfs_t *fs);
+
+glfs_t*
+virStorageBackendGlusterFindPreopened(virStorageSourcePtr src);
+
+int
+virStorageBackendGlusterClosePreopened(glfs_t *fs);
#endif /* __VIR_STORAGE_BACKEND_GLUSTER_H__ */
--
2.7.4
8 years, 4 months
[libvirt] [PATCH 0/2] qemu: error out on USB ports out of range
by Ján Tomko
https://bugzilla.redhat.com/show_bug.cgi?id=1399260
Ján Tomko (2):
tests: Fix USB ports in usb-redir-filter
qemu: error out on USB ports out of range
src/conf/domain_addr.c | 7 ++++++
.../qemuxml2argv-usb-ports-out-of-range.xml | 25 ++++++++++++++++++++++
.../qemuxml2argv-usb-redir-filter-version.args | 4 ++--
.../qemuxml2argv-usb-redir-filter-version.xml | 4 ++--
tests/qemuxml2argvtest.c | 3 +++
.../qemuxml2xmlout-usb-redir-filter-version.xml | 4 ++--
6 files changed, 41 insertions(+), 6 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-usb-ports-out-of-range.xml
--
2.7.3
8 years, 4 months
[libvirt] [PATCH] qemuProcessReconnect: Avoid relabeling images after migration
by Jiri Denemark
Restarting libvirtd on the source host at the end of migration when a
domain is already running on the destination would cause image labels to
be reset effectively killing the domain. Commit e8d0166e1d fixed similar
issue on the destination host, but kept the source always resetting the
labels, which was mostly correct except for the specific case handled by
this patch.
https://bugzilla.redhat.com/show_bug.cgi?id=1343858
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_process.c | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index f8f379a58..01c9259b6 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2955,7 +2955,8 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
virConnectPtr conn,
qemuMigrationJobPhase phase,
virDomainState state,
- int reason)
+ int reason,
+ unsigned int *stopFlags)
{
bool postcopy = state == VIR_DOMAIN_PAUSED &&
(reason == VIR_DOMAIN_PAUSED_POSTCOPY ||
@@ -3019,6 +3020,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
case QEMU_MIGRATION_PHASE_CONFIRM3:
/* migration completed, we need to kill the domain here */
+ *stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
return -1;
}
@@ -3044,7 +3046,8 @@ static int
qemuProcessRecoverJob(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virConnectPtr conn,
- const struct qemuDomainJobObj *job)
+ const struct qemuDomainJobObj *job,
+ unsigned int *stopFlags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainState state;
@@ -3055,7 +3058,7 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
switch (job->asyncJob) {
case QEMU_ASYNC_JOB_MIGRATION_OUT:
if (qemuProcessRecoverMigrationOut(driver, vm, conn, job->phase,
- state, reason) < 0)
+ state, reason, stopFlags) < 0)
return -1;
break;
@@ -3402,7 +3405,7 @@ qemuProcessReconnect(void *opaque)
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error;
- if (qemuProcessRecoverJob(driver, obj, conn, &oldjob) < 0)
+ if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0)
goto error;
if (qemuProcessUpdateDevices(driver, obj) < 0)
--
2.11.0.rc2
8 years, 4 months
[libvirt] [PATCH] qemu: Report tunnelled post-copy migration as unsupported
by Jiri Denemark
Post-copy migration needs bi-directional communication between the
source and the destination QEMU processes, which is not supported by
tunnelled migration.
https://bugzilla.redhat.com/show_bug.cgi?id=1371358
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_migration.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index d4a55d8f7..26b2e6d25 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3167,6 +3167,12 @@ qemuMigrationBeginPhase(virQEMUDriverPtr driver,
goto cleanup;
}
+ if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
+ virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("post-copy is not supported with tunnelled migration"));
+ goto cleanup;
+ }
+
if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) {
bool has_drive_mirror = virQEMUCapsGet(priv->qemuCaps,
QEMU_CAPS_DRIVE_MIRROR);
@@ -3645,6 +3651,12 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
goto cleanup;
}
+ if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
+ virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
+ _("post-copy is not supported with tunnelled migration"));
+ goto cleanup;
+ }
+
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
--
2.11.0.rc2
8 years, 4 months
[libvirt] [PATCH 0/2] Fix case with non-root domain and hugepages
by Michal Privoznik
Yet another bug found due to my work on containerizing qemu.
Michal Privoznik (2):
qemu: Create hugepage path on per domain basis
security: Implement virSecurityManagerSetHugepages
src/qemu/qemu_command.c | 4 +-
src/qemu/qemu_conf.c | 45 ++++++++++++++++------
src/qemu/qemu_conf.h | 16 +++++---
src/qemu/qemu_driver.c | 19 +++------
src/qemu/qemu_process.c | 25 +++++++++++-
src/security/security_dac.c | 11 ++++++
src/security/security_selinux.c | 10 +++++
.../qemuxml2argv-hugepages-numa.args | 4 +-
.../qemuxml2argv-hugepages-pages.args | 14 +++----
.../qemuxml2argv-hugepages-pages2.args | 2 +-
.../qemuxml2argv-hugepages-pages3.args | 2 +-
.../qemuxml2argv-hugepages-pages5.args | 2 +-
.../qemuxml2argv-hugepages-shared.args | 12 +++---
tests/qemuxml2argvdata/qemuxml2argv-hugepages.args | 2 +-
.../qemuxml2argv-memory-hotplug-dimm-addr.args | 4 +-
.../qemuxml2argv-memory-hotplug-dimm.args | 4 +-
16 files changed, 118 insertions(+), 58 deletions(-)
--
2.8.4
8 years, 4 months
[libvirt] [PATCH v2 00/31] qemu: Add support for unavailable-features
by Jiri Denemark
QEMU 2.8.0 adds support for unavailable-features in
query-cpu-definitions reply. The unavailable-features array lists CPU
features which prevent a corresponding CPU model from being usable on
current host. It can only be used when all the unavailable features are
disabled. Empty array means the CPU model can be used without
modifications.
Changes in v2:
- many; the way we probe QEMU was rewritten so that we probe both KVM
and TCG capabilities and store them separately whenever we expect them
to be different
Big thanks to Andrea Bolognani for providing the updated replies data
from aarch64 and ppc64le machines.
Jiri Denemark (31):
qemu: Make QMP probing process reusable
qemu: Use -machine when probing capabilities via QMP
qemu: Probe KVM state earlier
qemucapsprobe: Ignore all greetings except the first one
qemu: Enable KVM when probing capabilities
qemu: Discard caps cache when KVM availability changes
qemu: Use saner defaults for domain capabilities
qemu: Don't return unusable virttype in domain capabilities
qemu: Refactor virQEMUCapsCacheLookup
qemu: Refresh caps in virQEMUCapsCacheLookupByArch
qemu: Introduce virQEMUCapsLoadCPUModels
qemu: Introduce virQEMUCapsFormatCPUModels
qemu: Probe CPU models for KVM and TCG
tests: Update capabilities for QEMU 1.2.2
tests: Update capabilities for QEMU 1.3.1
tests: Update capabilities for QEMU 1.4.2
tests: Update capabilities for QEMU 1.5.3
tests: Update capabilities for QEMU 1.6.0
tests: Update capabilities for QEMU 1.7.0
tests: Update capabilities for QEMU 2.1.1
tests: Update capabilities for QEMU 2.4.0
tests: Update capabilities for QEMU 2.5.0
tests: Update capabilities for QEMU 2.6.0
tests: Update capabilities for QEMU 2.6.0 (aarch64, GICv2)
qemu: Avoid reporting "host" as a supported CPU model
tests: Update capabilities for QEMU 2.6.0 (aarch64, GICv3)
tests: Update capabilities for QEMU 2.6.0 (ppc64le)
tests: Update capabilities for QEMU 2.7.0
tests: Add QEMU 2.8.0 capabilities data
tests: Add QEMU 2.8.0 domain capabilities tests
qemu: Add support for unavailable-features
src/conf/domain_capabilities.c | 6 +-
src/conf/domain_capabilities.h | 3 +-
src/qemu/qemu_capabilities.c | 684 +-
src/qemu/qemu_capabilities.h | 15 +-
src/qemu/qemu_capspriv.h | 8 +
src/qemu/qemu_driver.c | 27 +-
src/qemu/qemu_monitor.h | 1 +
src/qemu/qemu_monitor_json.c | 18 +
src/qemu/qemu_process.c | 3 +-
tests/domaincapsschemadata/qemu_2.6.0.ppc64le.xml | 1 +
.../domaincapsschemadata/qemu_2.8.0-tcg.x86_64.xml | 116 +
tests/domaincapsschemadata/qemu_2.8.0.x86_64.xml | 116 +
tests/domaincapstest.c | 8 +
.../qemucapabilitiesdata/caps_1.2.2.x86_64.replies | 216 +-
tests/qemucapabilitiesdata/caps_1.2.2.x86_64.xml | 68 +-
.../qemucapabilitiesdata/caps_1.3.1.x86_64.replies | 172 +-
tests/qemucapabilitiesdata/caps_1.3.1.x86_64.xml | 74 +-
.../qemucapabilitiesdata/caps_1.4.2.x86_64.replies | 170 +-
tests/qemucapabilitiesdata/caps_1.4.2.x86_64.xml | 74 +-
.../qemucapabilitiesdata/caps_1.5.3.x86_64.replies | 170 +-
tests/qemucapabilitiesdata/caps_1.5.3.x86_64.xml | 74 +-
.../qemucapabilitiesdata/caps_1.6.0.x86_64.replies | 170 +-
tests/qemucapabilitiesdata/caps_1.6.0.x86_64.xml | 74 +-
.../qemucapabilitiesdata/caps_1.7.0.x86_64.replies | 170 +-
tests/qemucapabilitiesdata/caps_1.7.0.x86_64.xml | 74 +-
.../qemucapabilitiesdata/caps_2.1.1.x86_64.replies | 173 +-
tests/qemucapabilitiesdata/caps_2.1.1.x86_64.xml | 77 +-
.../qemucapabilitiesdata/caps_2.4.0.x86_64.replies | 182 +-
tests/qemucapabilitiesdata/caps_2.4.0.x86_64.xml | 86 +-
.../qemucapabilitiesdata/caps_2.5.0.x86_64.replies | 182 +-
tests/qemucapabilitiesdata/caps_2.5.0.x86_64.xml | 86 +-
.../caps_2.6.0-gicv2.aarch64.replies | 194 +-
.../caps_2.6.0-gicv2.aarch64.xml | 93 +-
.../caps_2.6.0-gicv3.aarch64.replies | 207 +-
.../caps_2.6.0-gicv3.aarch64.xml | 95 +-
.../caps_2.6.0.ppc64le.replies | 1406 +-
tests/qemucapabilitiesdata/caps_2.6.0.ppc64le.xml | 1296 +-
.../qemucapabilitiesdata/caps_2.6.0.x86_64.replies | 182 +-
tests/qemucapabilitiesdata/caps_2.6.0.x86_64.xml | 86 +-
.../qemucapabilitiesdata/caps_2.7.0.x86_64.replies | 185 +-
tests/qemucapabilitiesdata/caps_2.7.0.x86_64.xml | 89 +-
.../qemucapabilitiesdata/caps_2.8.0.x86_64.replies | 14246 +++++++++++++++++++
tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml | 293 +
tests/qemucapabilitiestest.c | 6 +
tests/qemucapsprobemock.c | 9 +-
tests/qemumonitorjsontest.c | 27 +-
tests/qemuxml2argvtest.c | 39 +-
47 files changed, 20114 insertions(+), 1637 deletions(-)
create mode 100644 tests/domaincapsschemadata/qemu_2.8.0-tcg.x86_64.xml
create mode 100644 tests/domaincapsschemadata/qemu_2.8.0.x86_64.xml
create mode 100644 tests/qemucapabilitiesdata/caps_2.8.0.x86_64.replies
create mode 100644 tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml
--
2.10.2
8 years, 4 months
[libvirt] Wiki account
by Eric Farman
I believe this is still the preferred method... Could I please have an
account "farman" for the libvirt wiki, in order to contribute some of
the cover-letter "howto" information from the recent vhost-scsi series?
Thanks,
- Eric
8 years, 4 months
[libvirt] [PATCH v4] storage_backend_rbd: check the return value of rados_conf_set
by Chen Hanxiao
From: Chen Hanxiao <chenhanxiao(a)gmail.com>
We had a lot of rados_conf_set and check works.
Use helper virStorageBackendRBDRADOSConfSet for them.
Signed-off-by: Chen Hanxiao <chenhanxiao(a)gmail.com>
---
v4: introduce helper virStorageBackendRBDRADOSConfSet
v3: fix a copy-paste error
v2: add another missing return value check
src/storage/storage_backend_rbd.c | 72 +++++++++++++++++++++++----------------
1 file changed, 42 insertions(+), 30 deletions(-)
diff --git a/src/storage/storage_backend_rbd.c b/src/storage/storage_backend_rbd.c
index 718c4d6..b1c51ab 100644
--- a/src/storage/storage_backend_rbd.c
+++ b/src/storage/storage_backend_rbd.c
@@ -52,6 +52,23 @@ typedef struct _virStorageBackendRBDState virStorageBackendRBDState;
typedef virStorageBackendRBDState *virStorageBackendRBDStatePtr;
static int
+virStorageBackendRBDRADOSConfSet(rados_t cluster,
+ const char *option,
+ const char *value)
+{
+ VIR_DEBUG("Setting RADOS option '%s' to '%s'",
+ option, value);
+ if (rados_conf_set(cluster, option, value) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("failed to set RADOS option: %s"),
+ option);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
virConnectPtr conn,
virStoragePoolSourcePtr source)
@@ -93,20 +110,13 @@ virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
if (!(rados_key = virStringEncodeBase64(secret_value, secret_value_size)))
goto cleanup;
- VIR_DEBUG("Found cephx key: %s", rados_key);
- if (rados_conf_set(ptr->cluster, "key", rados_key) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR,
- _("failed to set RADOS option: %s"),
- "rados_key");
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "key", rados_key) < 0)
goto cleanup;
- }
- if (rados_conf_set(ptr->cluster, "auth_supported", "cephx") < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR,
- _("failed to set RADOS option: %s"),
- "auth_supported");
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "auth_supported", "cephx") < 0)
goto cleanup;
- }
} else {
VIR_DEBUG("Not using cephx authorization");
if (rados_create(&ptr->cluster, NULL) < 0) {
@@ -114,12 +124,9 @@ virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
_("failed to create the RADOS cluster"));
goto cleanup;
}
- if (rados_conf_set(ptr->cluster, "auth_supported", "none") < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR,
- _("failed to set RADOS option: %s"),
- "auth_supported");
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "auth_supported", "none") < 0)
goto cleanup;
- }
}
VIR_DEBUG("Found %zu RADOS cluster monitors in the pool configuration",
@@ -145,35 +152,40 @@ virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
goto cleanup;
mon_buff = virBufferContentAndReset(&mon_host);
- VIR_DEBUG("RADOS mon_host has been set to: %s", mon_buff);
- if (rados_conf_set(ptr->cluster, "mon_host", mon_buff) < 0) {
- virReportError(VIR_ERR_INTERNAL_ERROR,
- _("failed to set RADOS option: %s"),
- "mon_host");
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "mon_host",
+ mon_buff) < 0)
goto cleanup;
- }
/*
* Set timeout options for librados.
* In case the Ceph cluster is down libvirt won't block forever.
* Operations in librados will return -ETIMEDOUT when the timeout is reached.
*/
- VIR_DEBUG("Setting RADOS option client_mount_timeout to %s", client_mount_timeout);
- rados_conf_set(ptr->cluster, "client_mount_timeout", client_mount_timeout);
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "client_mount_timeout",
+ client_mount_timeout) < 0)
+ goto cleanup;
- VIR_DEBUG("Setting RADOS option rados_mon_op_timeout to %s", mon_op_timeout);
- rados_conf_set(ptr->cluster, "rados_mon_op_timeout", mon_op_timeout);
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "rados_mon_op_timeout",
+ mon_op_timeout) < 0)
+ goto cleanup;
- VIR_DEBUG("Setting RADOS option rados_osd_op_timeout to %s", osd_op_timeout);
- rados_conf_set(ptr->cluster, "rados_osd_op_timeout", osd_op_timeout);
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "rados_osd_op_timeout",
+ osd_op_timeout) < 0)
+ goto cleanup;
/*
* Librbd supports creating RBD format 2 images. We no longer have to invoke
* rbd_create3(), we can tell librbd to default to format 2.
* This leaves us to simply use rbd_create() and use the default behavior of librbd
*/
- VIR_DEBUG("Setting RADOS option rbd_default_format to %s", rbd_default_format);
- rados_conf_set(ptr->cluster, "rbd_default_format", rbd_default_format);
+ if (virStorageBackendRBDRADOSConfSet(ptr->cluster,
+ "rbd_default_format",
+ rbd_default_format) < 0)
+ goto cleanup;
ptr->starttime = time(0);
if ((r = rados_connect(ptr->cluster)) < 0) {
--
2.7.4
8 years, 4 months
[libvirt] [PATCH] qemu: capabilities: Don't partially reprope caps on process reconnect
by Peter Krempa
Thanks to the complex capability caching code virQEMUCapsProbeQMP was
never called when we were starting a new qemu VM. On the other hand,
when we are reconnecting to the qemu process we reload the capability
list from the status XML file. This means that the flag preventing the
function being called was not set and thus we partially reprobed some of
the capabilities.
The recent addition of CPU hotplug clears the
QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS if the machine does not support it.
The partial re-probe on reconnect results into attempting to call the
unsupported command and then killing the VM.
Remove the partial reprobe and depend on the stored capabilities. If it
will be necessary to reprobe the capabilities in the future, we should
do a full reprobe rather than this partial one.
---
src/qemu/qemu_capabilities.c | 17 -----------------
src/qemu/qemu_capabilities.h | 3 ---
src/qemu/qemu_process.c | 4 ----
3 files changed, 24 deletions(-)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 8901e7b..37e5302 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -2937,23 +2937,6 @@ virQEMUCapsProbeQMPGICCapabilities(virQEMUCapsPtr qemuCaps,
return 0;
}
-int virQEMUCapsProbeQMP(virQEMUCapsPtr qemuCaps,
- qemuMonitorPtr mon)
-{
- VIR_DEBUG("qemuCaps=%p mon=%p", qemuCaps, mon);
-
- if (qemuCaps->usedQMP)
- return 0;
-
- if (virQEMUCapsProbeQMPCommands(qemuCaps, mon) < 0)
- return -1;
-
- if (virQEMUCapsProbeQMPEvents(qemuCaps, mon) < 0)
- return -1;
-
- return 0;
-}
-
static bool
virQEMUCapsCPUFilterFeatures(const char *name,
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index 5255815..be71507 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -403,9 +403,6 @@ virQEMUCapsPtr virQEMUCapsNew(void);
int virQEMUCapsInitQMPMonitor(virQEMUCapsPtr qemuCaps,
qemuMonitorPtr mon);
-int virQEMUCapsProbeQMP(virQEMUCapsPtr qemuCaps,
- qemuMonitorPtr mon);
-
void virQEMUCapsSet(virQEMUCapsPtr qemuCaps,
virQEMUCapsFlags flag) ATTRIBUTE_NONNULL(1);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index ab0c2c8..90f1101 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1723,10 +1723,6 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob,
if (qemuMonitorSetCapabilities(priv->mon) < 0)
goto cleanup;
- if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON) &&
- virQEMUCapsProbeQMP(priv->qemuCaps, priv->mon) < 0)
- goto cleanup;
-
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT) &&
qemuMonitorSetMigrationCapability(priv->mon,
QEMU_MONITOR_MIGRATION_CAPS_EVENTS,
--
2.10.2
8 years, 4 months
[libvirt] [PATCH V2] qemu: Redefine the "unlimited" memory limits one more time
by Viktor Mihajlovski
With kernel 3.18 (since commit 3e32cb2e0a12b6915056ff04601cf1bb9b44f967) the
"unlimited" value for cgroup memory limits has changed once again as its byte
value is now computed from a page counter.
The new "unlimited" value reported by the cgroup fs is therefore 2**51-1 pages
which is (VIR_DOMAIN_MEMORY_PARAM_UNLIMITED - 3072). This results e.g. in virsh
memtune displaying 9007199254740988 instead of unlimited for the limits.
This patch deals with the rounding issue by scaling the byte values reported
by the kernel and the PARAM_UNLIMITED value to page size and comparing those.
See also libvirt commit 231656bbeb9e4d3bedc44362784c35eee21cf0f4 for the
history for kernel 3.12 and before.
Signed-off-by: Viktor Mihajlovski <mihajlov(a)linux.vnet.ibm.com>
---
V2: Shifting the scaled kb values by 2 is of sufficient to account for
4K pages. Friday night fallout, sorry for that.
src/util/vircgroup.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/util/vircgroup.c b/src/util/vircgroup.c
index 24917e7..39c7de2 100644
--- a/src/util/vircgroup.c
+++ b/src/util/vircgroup.c
@@ -2542,7 +2542,7 @@ virCgroupGetMemoryHardLimit(virCgroupPtr group, unsigned long long *kb)
goto cleanup;
*kb = limit_in_bytes >> 10;
- if (*kb > VIR_DOMAIN_MEMORY_PARAM_UNLIMITED)
+ if (*kb >> 2 >= VIR_DOMAIN_MEMORY_PARAM_UNLIMITED >> 2)
*kb = VIR_DOMAIN_MEMORY_PARAM_UNLIMITED;
ret = 0;
@@ -2604,7 +2604,7 @@ virCgroupGetMemorySoftLimit(virCgroupPtr group, unsigned long long *kb)
goto cleanup;
*kb = limit_in_bytes >> 10;
- if (*kb > VIR_DOMAIN_MEMORY_PARAM_UNLIMITED)
+ if (*kb >> 2 >= VIR_DOMAIN_MEMORY_PARAM_UNLIMITED >> 2)
*kb = VIR_DOMAIN_MEMORY_PARAM_UNLIMITED;
ret = 0;
@@ -2666,7 +2666,7 @@ virCgroupGetMemSwapHardLimit(virCgroupPtr group, unsigned long long *kb)
goto cleanup;
*kb = limit_in_bytes >> 10;
- if (*kb > VIR_DOMAIN_MEMORY_PARAM_UNLIMITED)
+ if (*kb >> 2 >= VIR_DOMAIN_MEMORY_PARAM_UNLIMITED >> 2)
*kb = VIR_DOMAIN_MEMORY_PARAM_UNLIMITED;
ret = 0;
--
1.9.1
8 years, 4 months