[libvirt] virsh stucked in virDomainCreate
by Anthony Lannuzel
Hi,
I'm having an issue when trying to start a (previously created) vbox
item : virsh gets stucked in virDomainCreate and I see no error
message (LIBVIRT_DEBUG=1).
Is there any mean to get more information on what is happening ?
Moreover, now that virsh has stalled, running "connect
vbox:///session" on another virsh instance gets an error from "secret
driver". Is this normal, as if I'm only supposed to have 1 connection
at a time ?
Thanks
Regards
Anthony
14 years, 9 months
[libvirt] [PATCH] macvtap: Only export symbols if support is enabled
by Matthias Bolte
---
src/Makefile.am | 2 ++
1 files changed, 2 insertions(+), 0 deletions(-)
diff --git a/src/Makefile.am b/src/Makefile.am
index 67f8b6d..4c12586 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -786,7 +786,9 @@ if WITH_LINUX
USED_SYM_FILES += libvirt_linux.syms
endif
+if WITH_MACVTAP
USED_SYM_FILES += libvirt_macvtap.syms
+endif
EXTRA_DIST += \
libvirt_public.syms \
--
1.6.3.3
14 years, 9 months
[libvirt] Heartbeat and reboot issue
by cheibi welid
Hi,
I currently testing a two-nodes HA-cluster with DRBD (Pacemaker, Heartbeat,
DRBD). When the virtual machines (with KVM) are running on one node and I
halt or reboot this node, heartbeat cannot migrate the resources to the
other node. Then the virtual machines are unmanaged in both nodes. I think
it's related to heartbeat and libvirt init order (libvirtd shouldn't stop
before heartbeat) or some configuration is missing ?
Thank you in advance.
Best regards.
Chaibi.
14 years, 9 months
[libvirt] unable to connect to a ESX via ssh
by Dimitris Kalogeras
Hi *,
Apologies for cross posting.
I have installed the the libvirt and virt-toolss in an ubuntu karmic
9.10. I am trying to access-manage an ESX 3.5i via ssh protocol.
Although I have configured and verified that ssh access operates
correctly with keys, when I try to access the ESX via virt-manager,
it doesn't work.
Cheers,
Dimitris
--------------
Dimitrios K. Kalogeras
Electrical Engineer Ph.D.
Network Engineer
NTUA/GR-Net Network Management Center
_____________________________________
skype: aweboy
voice: +30-210-772 1863
fax: +30-210-772 1866
14 years, 9 months
[libvirt] [PATCH] qemu: Add some debugging at domain startup
by Cole Robinson
Signed-off-by: Cole Robinson <crobinso(a)redhat.com>
---
src/qemu/qemu_driver.c | 24 +++++++++++++++++++++++-
1 files changed, 23 insertions(+), 1 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f8ab545..26b5600 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2695,12 +2695,15 @@ static int qemudStartVMDaemon(virConnectPtr conn,
FD_ZERO(&keepfd);
+ DEBUG0("Beginning VM startup process");
+
if (virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("VM is already active"));
return -1;
}
+ DEBUG0("Generating domain security label (if required)");
/* If you are using a SecurityDriver with dynamic labelling,
then generate a security label for isolation */
if (driver->securityDriver &&
@@ -2708,17 +2711,21 @@ static int qemudStartVMDaemon(virConnectPtr conn,
driver->securityDriver->domainGenSecurityLabel(vm) < 0)
return -1;
+ DEBUG0("Generating setting domain security labels (if required)");
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityAllLabel &&
driver->securityDriver->domainSetSecurityAllLabel(vm) < 0)
goto cleanup;
- /* Ensure no historical cgroup for this VM is lieing around bogus settings */
+ /* Ensure no historical cgroup for this VM is lieing around bogus
+ * settings */
+ DEBUG0("Removing old cgroup (if required)");
qemuRemoveCgroup(driver, vm, 1);
if ((vm->def->ngraphics == 1) &&
vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
vm->def->graphics[0]->data.vnc.autoport) {
+ DEBUG0("Determining VNC port");
int port = qemudNextFreeVNCPort(driver);
if (port < 0) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
@@ -2735,6 +2742,7 @@ static int qemudStartVMDaemon(virConnectPtr conn,
goto cleanup;
}
+ DEBUG0("Creating domain log file");
if ((logfile = qemudLogFD(driver, vm->def->name)) < 0)
goto cleanup;
@@ -2751,14 +2759,17 @@ static int qemudStartVMDaemon(virConnectPtr conn,
goto cleanup;
}
+ DEBUG0("Determing emulator version");
if (qemudExtractVersionInfo(emulator,
NULL,
&qemuCmdFlags) < 0)
goto cleanup;
+ DEBUG0("Setting up domain cgroup (if required)");
if (qemuSetupCgroup(driver, vm) < 0)
goto cleanup;
+ DEBUG0("Preparing host devices");
if (qemuPrepareHostDevices(driver, vm->def) < 0)
goto cleanup;
@@ -2767,6 +2778,7 @@ static int qemudStartVMDaemon(virConnectPtr conn,
goto cleanup;
}
+ DEBUG0("Preparing monitor state");
if (qemuPrepareMonitorChr(driver, priv->monConfig, vm->def->name) < 0)
goto cleanup;
@@ -2798,6 +2810,7 @@ static int qemudStartVMDaemon(virConnectPtr conn,
* use in hotplug
*/
if (qemuCmdFlags & QEMUD_CMD_FLAG_DEVICE) {
+ DEBUG0("Assigning domain PCI addresses");
/* Populate cache with current addresses */
if (priv->pciaddrs) {
qemuDomainPCIAddressSetFree(priv->pciaddrs);
@@ -2816,6 +2829,7 @@ static int qemudStartVMDaemon(virConnectPtr conn,
priv->persistentAddrs = 0;
}
+ DEBUG0("Building emulator command line");
vm->def->id = driver->nextvmid++;
if (qemudBuildCommandLine(conn, driver, vm->def, priv->monConfig,
priv->monJSON, qemuCmdFlags, &argv, &progenv,
@@ -2899,25 +2913,31 @@ static int qemudStartVMDaemon(virConnectPtr conn,
if (ret == -1) /* The VM failed to start */
goto cleanup;
+ DEBUG0("Waiting for monitor to show up");
if (qemudWaitForMonitor(driver, vm, pos) < 0)
goto abort;
+ DEBUG0("Detecting VCPU PIDs");
if (qemuDetectVcpuPIDs(driver, vm) < 0)
goto abort;
+ DEBUG0("Setting CPU affinity");
if (qemudInitCpuAffinity(vm) < 0)
goto abort;
+ DEBUG0("Setting any required VM passwords");
if (qemuInitPasswords(conn, driver, vm, qemuCmdFlags) < 0)
goto abort;
/* If we have -device, then addresses are assigned explicitly.
* If not, then we have to detect dynamic ones here */
if (!(qemuCmdFlags & QEMUD_CMD_FLAG_DEVICE)) {
+ DEBUG0("Determining domain device PCI addresses");
if (qemuInitPCIAddresses(driver, vm) < 0)
goto abort;
}
+ DEBUG0("Setting initial memory amount");
qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (qemuMonitorSetBalloon(priv->mon, vm->def->memory) < 0) {
qemuDomainObjExitMonitorWithDriver(driver, vm);
@@ -2925,6 +2945,7 @@ static int qemudStartVMDaemon(virConnectPtr conn,
}
if (migrateFrom == NULL) {
+ DEBUG0("Starting domain CPUs");
/* Allow the CPUS to start executing */
if (qemuMonitorStartCPUs(priv->mon, conn) < 0) {
if (virGetLastError() == NULL)
@@ -2937,6 +2958,7 @@ static int qemudStartVMDaemon(virConnectPtr conn,
qemuDomainObjExitMonitorWithDriver(driver, vm);
+ DEBUG0("Writing domain status to disk");
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
goto abort;
--
1.6.6
14 years, 9 months
Re: [libvirt] Request to mailing list libvir-list rejected
by Eric Blake
[in general, technical lists tend to frown on the practice of top-posting]
On 03/10/2010 02:49 PM, Dev.Atom wrote:
> Sorry, I'm not used to use mailing list
>
> I think the relevants part are these functions :
>
> int virFileOperation(const char *path, int openflags, mode_t mode,
> uid_t uid, gid_t gid,
> virFileOperationHook hook, void *hookdata,
> unsigned int flags)
> __attribute__((__warn_unused_result__));
>
> int virDirCreate(const char *path, mode_t mode, uid_t uid, gid_t gid,
> unsigned int flags)
> __attribute__((__warn_unused_result__));
Yes, my request was not to dump the entire (750k) preprocessor output to
the list, but to just the relevant portion of the output to make sure
there weren't any macros interfering with the parse.
>
> Matthias say that he has patches which will be ready in the week, and I
> can wait for these patches
Matthias probably already hit it on the head - gnulib can guarantee that
uid_t is defined in spite of mingw not providing it, but I haven't
personally checked whether we are using this aspect of gnulib yet, and I
will defer to see Matthias' patches.
--
Eric Blake eblake(a)redhat.com +1-801-349-2682
Libvirt virtualization library http://libvirt.org
14 years, 9 months
[libvirt] [PATCH] Only use the numa functions when they are available.
by Chris Lalancette
Signed-off-by: Chris Lalancette <clalance(a)redhat.com>
---
src/nodeinfo.c | 8 +++++---
1 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/src/nodeinfo.c b/src/nodeinfo.c
index 8d7e055..bf57517 100644
--- a/src/nodeinfo.c
+++ b/src/nodeinfo.c
@@ -160,10 +160,12 @@ int linuxNodeInfoCPUPopulate(virConnectPtr conn, FILE *cpuinfo,
nodeinfo->cpus = 0;
nodeinfo->mhz = 0;
nodeinfo->cores = 1;
- if (numa_available() < 0)
- nodeinfo->nodes = 1;
- else
+
+ nodeinfo->nodes = 1;
+#if HAVE_NUMACTL
+ if (numa_available() >= 0)
nodeinfo->nodes = numa_max_node() + 1;
+#endif
/* NB: It is impossible to fill our nodes, since cpuinfo
* has no knowledge of NUMA nodes */
--
1.6.6.1
14 years, 9 months
[libvirt] [PATCH] Make nodeGetInfo report the correct number of NUMA nodes.
by Chris Lalancette
The nodeGetInfo code was always assuming that machine had a
single NUMA node, which is not correct. The good news is that
libnuma gives us this information pretty easily, so let's
properly report it.
NOTE: With recent hardware starting to support CPU hot-add
and hot-remove, both this code and the nodeCapsInitNUMA()
code are quickly going to become obsolete. We'll have to
think of a more dynamic solution for dealing with NUMA
nodes and CPUs that can come and go at will.
Signed-off-by: Chris Lalancette <clalance(a)redhat.com>
---
src/nodeinfo.c | 6 +++++-
1 files changed, 5 insertions(+), 1 deletions(-)
diff --git a/src/nodeinfo.c b/src/nodeinfo.c
index 0748602..8d7e055 100644
--- a/src/nodeinfo.c
+++ b/src/nodeinfo.c
@@ -159,7 +159,11 @@ int linuxNodeInfoCPUPopulate(virConnectPtr conn, FILE *cpuinfo,
nodeinfo->cpus = 0;
nodeinfo->mhz = 0;
- nodeinfo->nodes = nodeinfo->cores = 1;
+ nodeinfo->cores = 1;
+ if (numa_available() < 0)
+ nodeinfo->nodes = 1;
+ else
+ nodeinfo->nodes = numa_max_node() + 1;
/* NB: It is impossible to fill our nodes, since cpuinfo
* has no knowledge of NUMA nodes */
--
1.6.6.1
14 years, 9 months
[libvirt] [PATCH] Allow a user-settable number of VNC autoports.
by Chris Lalancette
Currently we have a hard-coded maximum of 100 VNC autoports
that the qemu driver can use. This may not be enough for
running large numbers of guests on larger machines.
However, we don't necessarily necessarily want to make
it an open-ended number; that could lead to Denial of
Service. Allow a user-settable option to control the
number of autoports we will assign. The default is still
100, but now users can increase that if they wish.
Signed-off-by: Chris Lalancette <clalance(a)redhat.com>
---
src/qemu/libvirtd_qemu.aug | 3 +++
src/qemu/qemu.conf | 4 ++++
src/qemu/qemu_conf.c | 6 ++++++
src/qemu/qemu_conf.h | 2 ++
src/qemu/qemu_driver.c | 5 +++--
src/qemu/test_libvirtd_qemu.aug | 14 ++++++++++++++
6 files changed, 32 insertions(+), 2 deletions(-)
diff --git a/src/qemu/libvirtd_qemu.aug b/src/qemu/libvirtd_qemu.aug
index 5bd60b3..0dd89ae 100644
--- a/src/qemu/libvirtd_qemu.aug
+++ b/src/qemu/libvirtd_qemu.aug
@@ -13,11 +13,13 @@ module Libvirtd_qemu =
let str_val = del /\"/ "\"" . store /[^\"]*/ . del /\"/ "\""
let bool_val = store /0|1/
+ let int_val = store /[0-9]+/
let str_array_element = [ seq "el" . str_val ] . del /[ \t\n]*/ ""
let str_array_val = counter "el" . array_start . ( str_array_element . ( array_sep . str_array_element ) * ) ? . array_end
let str_entry (kw:string) = [ key kw . value_sep . str_val ]
let bool_entry (kw:string) = [ key kw . value_sep . bool_val ]
+ let int_entry (kw:string) = [ key kw . value_sep . int_val ]
let str_array_entry (kw:string) = [ key kw . value_sep . str_array_val ]
@@ -29,6 +31,7 @@ module Libvirtd_qemu =
| str_entry "vnc_password"
| bool_entry "vnc_sasl"
| str_entry "vnc_sasl_dir"
+ | int_entry "vnc_max_connections"
| str_entry "security_driver"
| str_entry "user"
| str_entry "group"
diff --git a/src/qemu/qemu.conf b/src/qemu/qemu.conf
index 3da332f..edcf083 100644
--- a/src/qemu/qemu.conf
+++ b/src/qemu/qemu.conf
@@ -79,6 +79,10 @@
# vnc_sasl_dir = "/some/directory/sasl2"
+# The maximum number of VNC connections we will allow. Once
+# libvirtd has created this many qemu guests with VNC ports,
+# no more guests can be started. The default is 100 guests.
+# vnc_max_connections = 100
# The default security driver is SELinux. If SELinux is disabled
diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index 40ca221..02186fd 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -113,6 +113,8 @@ int qemudLoadDriverConfig(struct qemud_driver *driver,
return -1;
}
+ driver->vncMaxConnections = 100;
+
#ifdef HAVE_MNTENT_H
/* For privileged driver, try and find hugepage mount automatically.
* Non-privileged driver requires admin to create a dir for the
@@ -211,6 +213,10 @@ int qemudLoadDriverConfig(struct qemud_driver *driver,
}
}
+ p = virConfGetValue (conf, "vnc_max_connections");
+ CHECK_TYPE ("vnc_max_connections", VIR_CONF_LONG);
+ if (p) driver->vncMaxConnections = p->l;
+
p = virConfGetValue (conf, "user");
CHECK_TYPE ("user", VIR_CONF_STRING);
if (!(user = strdup(p && p->str ? p->str : QEMU_USER))) {
diff --git a/src/qemu/qemu_conf.h b/src/qemu/qemu_conf.h
index 6a9de5e..e0c4a9b 100644
--- a/src/qemu/qemu_conf.h
+++ b/src/qemu/qemu_conf.h
@@ -122,6 +122,8 @@ struct qemud_driver {
char *vncListen;
char *vncPassword;
char *vncSASLdir;
+ unsigned int vncMaxConnections;
+
char *hugetlbfs_mount;
char *hugepage_path;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 49983dd..8324075 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2147,10 +2147,11 @@ qemuInitPCIAddresses(struct qemud_driver *driver,
return ret;
}
-static int qemudNextFreeVNCPort(struct qemud_driver *driver ATTRIBUTE_UNUSED) {
+static int qemudNextFreeVNCPort(struct qemud_driver *driver)
+{
int i;
- for (i = 5900 ; i < 6000 ; i++) {
+ for (i = 5900 ; i < (5900 + driver->vncMaxConnections) ; i++) {
int fd;
int reuse = 1;
struct sockaddr_in addr;
diff --git a/src/qemu/test_libvirtd_qemu.aug b/src/qemu/test_libvirtd_qemu.aug
index 2feedc0..3c2190a 100644
--- a/src/qemu/test_libvirtd_qemu.aug
+++ b/src/qemu/test_libvirtd_qemu.aug
@@ -80,6 +80,13 @@ vnc_sasl = 1
#
vnc_sasl_dir = \"/some/directory/sasl2\"
+
+# The maximum number of VNC connections we will allow. Once
+# libvirtd has created this many qemu guests with VNC ports,
+# no more guests can be started. The default is 100 guests.
+#
+vnc_max_connections = 100
+
security_driver = \"selinux\"
user = \"root\"
@@ -180,6 +187,13 @@ relaxed_acs_check = 1
{ "#comment" = "" }
{ "vnc_sasl_dir" = "/some/directory/sasl2" }
{ "#empty" }
+{ "#empty" }
+{ "#comment" = "The maximum number of VNC connections we will allow. Once" }
+{ "#comment" = "libvirtd has created this many qemu guests with VNC ports," }
+{ "#comment" = "no more guests can be started. The default is 100 guests." }
+{ "#comment" = "" }
+{ "vnc_max_connections" = "100" }
+{ "#empty" }
{ "security_driver" = "selinux" }
{ "#empty" }
{ "user" = "root" }
--
1.6.6.1
14 years, 9 months
[libvirt] virsh stucked in virDomainCreate
by Anthony
Hi,
I'm having an issue when trying to start a (previously created) vbox
item : virsh gets stucked in virDomainCreate and I see no error
message (LIBVIRT_DEBUG=1).
Is there any mean to get more information on what is happening ?
Moreover, now that virsh has stalled, running "connect
vbox:///session" on another virsh instance gets an error from "secret
driver". Is this normal, as if I'm only supposed to have 1 connection
at a time ?
Thanks
Regards
Anthony
14 years, 9 months