[libvirt] [PATCH v2] locking: Add io_timeout to sanlock
by Michal Privoznik
https://bugzilla.redhat.com/show_bug.cgi?id=1251190
So, if domain loses access to storage, sanlock tries to kill it
after some timeout. So far, the default is 80 seconds. But for
some scenarios this might not be enough. We should allow users to
adjust the timeout according to their needs.
Signed-off-by: Michal Privoznik <mprivozn(a)redhat.com>
---
diff to v2:
- Check if the new sanlock API is accessible. If not, forbid setting timeout in
the config file.
m4/virt-sanlock.m4 | 7 +++++++
src/locking/libvirt_sanlock.aug | 1 +
src/locking/lock_driver_sanlock.c | 15 +++++++++++++++
src/locking/sanlock.conf | 7 +++++++
src/locking/test_libvirt_sanlock.aug.in | 1 +
5 files changed, 31 insertions(+)
diff --git a/m4/virt-sanlock.m4 b/m4/virt-sanlock.m4
index c7c0186..d2a607d 100644
--- a/m4/virt-sanlock.m4
+++ b/m4/virt-sanlock.m4
@@ -46,6 +46,13 @@ AC_DEFUN([LIBVIRT_CHECK_SANLOCK],[
[whether sanlock supports sanlock_inq_lockspace])
fi
+ AC_CHECK_LIB([sanlock_client], [sanlock_add_lockspace_timeout],
+ [sanlock_add_lockspace_timeout=yes], [sanlock_add_lockspace_timeout=no])
+ if test "x$sanlock_add_lockspace_timeout" = "xyes" ; then
+ AC_DEFINE_UNQUOTED([HAVE_SANLOCK_ADD_LOCKSPACE_TIMEOUT], 1,
+ [whether Sanlock supports sanlock_add_lockspace_timeout])
+ fi
+
CPPFLAGS="$old_cppflags"
LIBS="$old_libs"
fi
diff --git a/src/locking/libvirt_sanlock.aug b/src/locking/libvirt_sanlock.aug
index a78a444..8843590 100644
--- a/src/locking/libvirt_sanlock.aug
+++ b/src/locking/libvirt_sanlock.aug
@@ -22,6 +22,7 @@ module Libvirt_sanlock =
| int_entry "host_id"
| bool_entry "require_lease_for_disks"
| bool_entry "ignore_readonly_and_shared_disks"
+ | int_entry "io_timeout"
| str_entry "user"
| str_entry "group"
let comment = [ label "#comment" . del /#[ \t]*/ "# " . store /([^ \t\n][^\n]*)?/ . del /\n/ "\n" ]
diff --git a/src/locking/lock_driver_sanlock.c b/src/locking/lock_driver_sanlock.c
index e052875..dbda915 100644
--- a/src/locking/lock_driver_sanlock.c
+++ b/src/locking/lock_driver_sanlock.c
@@ -73,6 +73,7 @@ struct _virLockManagerSanlockDriver {
int hostID;
bool autoDiskLease;
char *autoDiskLeasePath;
+ unsigned int io_timeout;
/* under which permissions does sanlock run */
uid_t user;
@@ -151,6 +152,10 @@ static int virLockManagerSanlockLoadConfig(const char *configFile)
else
driver->requireLeaseForDisks = !driver->autoDiskLease;
+ p = virConfGetValue(conf, "io_timeout");
+ CHECK_TYPE("io_timeout", VIR_CONF_ULONG);
+ if (p) driver->io_timeout = p->l;
+
p = virConfGetValue(conf, "user");
CHECK_TYPE("user", VIR_CONF_STRING);
if (p) {
@@ -338,7 +343,16 @@ static int virLockManagerSanlockSetupLockspace(void)
* or we can fallback to polling.
*/
retry:
+#ifdef HAVE_SANLOCK_ADD_LOCKSPACE_TIMEOUT
+ if ((rv = sanlock_add_lockspace_timeout(&ls, 0, driver->io_timeout)) < 0) {
+#else
+ if (driver->io_timeout) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("unable to use io_timeout with this version of sanlock"));
+ goto error;
+ }
if ((rv = sanlock_add_lockspace(&ls, 0)) < 0) {
+#endif
if (-rv == EINPROGRESS && --retries) {
#ifdef HAVE_SANLOCK_INQ_LOCKSPACE
/* we have this function which blocks until lockspace change the
@@ -404,6 +418,7 @@ static int virLockManagerSanlockInit(unsigned int version,
driver->requireLeaseForDisks = true;
driver->hostID = 0;
driver->autoDiskLease = false;
+ driver->io_timeout = 0;
driver->user = (uid_t) -1;
driver->group = (gid_t) -1;
if (VIR_STRDUP(driver->autoDiskLeasePath, LOCALSTATEDIR "/lib/libvirt/sanlock") < 0) {
diff --git a/src/locking/sanlock.conf b/src/locking/sanlock.conf
index e5566ef..3a1a51c 100644
--- a/src/locking/sanlock.conf
+++ b/src/locking/sanlock.conf
@@ -54,6 +54,13 @@
#require_lease_for_disks = 1
#
+# Sanlock is able to kill qemu processes on IO timeout. By its internal
+# implementation, the current default is 80 seconds. If you need to adjust
+# the value change the following variable. Value of zero means use the
+# default sanlock timeout.
+#io_timeout = 0
+
+#
# The combination of user and group under which the sanlock
# daemon runs. Libvirt will chown created files (like
# content of disk_lease_dir) to make sure sanlock daemon can
diff --git a/src/locking/test_libvirt_sanlock.aug.in b/src/locking/test_libvirt_sanlock.aug.in
index ef98ea6..7f66f81 100644
--- a/src/locking/test_libvirt_sanlock.aug.in
+++ b/src/locking/test_libvirt_sanlock.aug.in
@@ -6,5 +6,6 @@ module Test_libvirt_sanlock =
{ "disk_lease_dir" = "/var/lib/libvirt/sanlock" }
{ "host_id" = "1" }
{ "require_lease_for_disks" = "1" }
+{ "io_timeout" = "0" }
{ "user" = "root" }
{ "group" = "root" }
--
2.4.10
9 years, 4 months
[libvirt] [PATCH v2] gobject: Add wrapper virDomainSetTime()
by Zeeshan Ali (Khattak)
---
This version:
* Replaces the seconds and nseconds arguments by a GDateTime.
* Drops the use of flags argument, since caller can specify the only flag currently possible (VIR_DOMAIN_TIME_SYNC) by simply passing a NULL as the GDateTime argument.
* Add some needed articles to doc comment.
libvirt-gobject/libvirt-gobject-domain.c | 121 +++++++++++++++++++++++++++++++
libvirt-gobject/libvirt-gobject-domain.h | 15 +++-
libvirt-gobject/libvirt-gobject.sym | 9 +++
3 files changed, 144 insertions(+), 1 deletion(-)
diff --git a/libvirt-gobject/libvirt-gobject-domain.c b/libvirt-gobject/libvirt-gobject-domain.c
index 34eb7ca..debae2d 100644
--- a/libvirt-gobject/libvirt-gobject-domain.c
+++ b/libvirt-gobject/libvirt-gobject-domain.c
@@ -1886,3 +1886,124 @@ gboolean gvir_domain_get_has_current_snapshot(GVirDomain *dom,
return TRUE;
}
+
+/**
+ * gvir_domain_set_time:
+ * @dom: the domain
+ * @date_time: (allow-none)(transfer none): the time to set as #GDateTime.
+ * @flags: Unused, Pass 0.
+ *
+ * This function tries to set guest time to the given value. The passed
+ * time must in UTC.
+ *
+ * If @date_time is %NULL, the time is reset using the domain's RTC.
+ *
+ * Please note that some hypervisors may require guest agent to be configured
+ * and running in order for this function to work.
+ */
+gboolean gvir_domain_set_time(GVirDomain *dom,
+ GDateTime *date_time,
+ guint flags G_GNUC_UNUSED,
+ GError **err)
+{
+ GVirDomainPrivate *priv;
+ int ret;
+ GTimeVal tv;
+ gint64 seconds;
+ guint nseconds;
+ guint settime_flags;
+
+ g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
+ g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
+
+ if (date_time != NULL) {
+ if (!g_date_time_to_timeval(date_time, &tv)) {
+ gvir_set_error_literal(err, GVIR_DOMAIN_ERROR,
+ 0,
+ "Failed to parse given time argument");
+ return FALSE;
+ }
+
+ seconds = tv.tv_sec;
+ nseconds = tv.tv_usec * 1000;
+ settime_flags = 0;
+ } else {
+ seconds = 0;
+ nseconds = 0;
+ settime_flags = VIR_DOMAIN_TIME_SYNC;
+ }
+
+ priv = dom->priv;
+ ret = virDomainSetTime(priv->handle, seconds, nseconds, settime_flags);
+ if (ret < 0) {
+ gvir_set_error_literal(err, GVIR_DOMAIN_ERROR,
+ 0,
+ "Unable to set domain time");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static void
+gvir_domain_set_time_helper(GTask *task,
+ gpointer object,
+ gpointer task_data,
+ GCancellable *cancellable G_GNUC_UNUSED)
+{
+ GVirDomain *dom = GVIR_DOMAIN(object);
+ GDateTime *date_time = (GDateTime *) task_data;
+ GError *err = NULL;
+
+ if (!gvir_domain_set_time(dom, date_time, 0, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
+}
+
+/**
+ * gvir_domain_set_time_async:
+ * @dom: the domain
+ * @date_time: (allow-none)(transfer none): the time to set as #GDateTime.
+ * @flags: bitwise-OR of #GVirDomainSetTimeFlags.
+ * @cancellable: (allow-none)(transfer none): cancellation object
+ * @callback: (scope async): completion callback
+ * @user_data: (closure): opaque data for callback
+ *
+ * Asynchronous variant of #gvir_domain_set_time.
+ */
+void gvir_domain_set_time_async(GVirDomain *dom,
+ GDateTime *date_time,
+ guint flags G_GNUC_UNUSED,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer user_data)
+{
+ GTask *task;
+
+ g_return_if_fail(GVIR_IS_DOMAIN(dom));
+ g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
+
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ if (date_time != NULL)
+ g_task_set_task_data(task,
+ g_date_time_ref(date_time),
+ (GDestroyNotify)g_date_time_unref);
+ g_task_run_in_thread(task, gvir_domain_set_time_helper);
+
+ g_object_unref(task);
+}
+
+gboolean gvir_domain_set_time_finish(GVirDomain *dom,
+ GAsyncResult *result,
+ GError **err)
+{
+ g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, G_OBJECT(dom)), FALSE);
+ g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
+
+ return g_task_propagate_boolean(G_TASK(result), err);
+}
diff --git a/libvirt-gobject/libvirt-gobject-domain.h b/libvirt-gobject/libvirt-gobject-domain.h
index 4fe381e..099cde3 100644
--- a/libvirt-gobject/libvirt-gobject-domain.h
+++ b/libvirt-gobject/libvirt-gobject-domain.h
@@ -215,7 +215,6 @@ typedef enum {
GVIR_DOMAIN_SNAPSHOT_LIST_EXTERNAL = VIR_DOMAIN_SNAPSHOT_LIST_EXTERNAL
} GVirDomainSnapshotListFlags;
-
typedef struct _GVirDomainInfo GVirDomainInfo;
struct _GVirDomainInfo
{
@@ -401,6 +400,20 @@ gboolean gvir_domain_get_has_current_snapshot(GVirDomain *dom,
gboolean *has_current_snapshot,
GError **error);
+gboolean gvir_domain_set_time(GVirDomain *dom,
+ GDateTime *date_time,
+ guint flags,
+ GError **err);
+void gvir_domain_set_time_async(GVirDomain *dom,
+ GDateTime *date_time,
+ guint flags,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer user_data);
+gboolean gvir_domain_set_time_finish(GVirDomain *dom,
+ GAsyncResult *result,
+ GError **err);
+
G_END_DECLS
#endif /* __LIBVIRT_GOBJECT_DOMAIN_H__ */
diff --git a/libvirt-gobject/libvirt-gobject.sym b/libvirt-gobject/libvirt-gobject.sym
index ca89a45..cbfaa71 100644
--- a/libvirt-gobject/libvirt-gobject.sym
+++ b/libvirt-gobject/libvirt-gobject.sym
@@ -304,4 +304,13 @@ LIBVIRT_GOBJECT_0.2.2 {
gvir_network_get_dhcp_leases;
} LIBVIRT_GOBJECT_0.2.1;
+LIBVIRT_GOBJECT_0.2.3 {
+ global:
+ gvir_domain_set_time_flags_get_type;
+
+ gvir_domain_set_time;
+ gvir_domain_set_time_async;
+ gvir_domain_set_time_finish;
+} LIBVIRT_GOBJECT_0.2.2;
+
# .... define new API here using predicted next version number ....
--
2.5.0
9 years, 4 months
[libvirt] [PATCH] vz: add func to set shared drivers after libvirtd init
by Mikhail Feoktistov
Built-in drivers in libvirt are initialized before libvirtd initialization.
Libvirt loads shared drivers on libvirtd initialization step.
For built-in drivers we can't set shared drivers, because they are not initialized yet.
This patch adds function to set shared drivers after libvirtd init.
---
daemon/libvirtd.c | 4 ++++
src/libvirt.c | 41 +++++++++++++++++++++++++++++++++++++++++
src/libvirt_internal.h | 1 +
src/libvirt_private.syms | 1 +
4 files changed, 47 insertions(+)
diff --git a/daemon/libvirtd.c b/daemon/libvirtd.c
index 250094b..aac1826 100644
--- a/daemon/libvirtd.c
+++ b/daemon/libvirtd.c
@@ -431,6 +431,10 @@ static void daemonInitialize(void)
bhyveRegister();
# endif
#endif
+# ifdef WITH_VZ
+ virAssignSharedDrivers("vz");
+ virAssignSharedDrivers("Parallels");
+# endif
}
diff --git a/src/libvirt.c b/src/libvirt.c
index 2602dde..4c4b7bd 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -1433,3 +1433,44 @@ virTypedParameterValidateSet(virConnectPtr conn,
}
return 0;
}
+
+/**
+ * virAssignSharedDrivers:
+ * @name: name of connection driver
+ *
+ * This function fills in any empty pointers for shared drivers
+ * in connect driver structure
+ *
+ * Returns 0 in case of success, -1 in case of error
+*/
+int
+virAssignSharedDrivers(const char *name)
+{
+ size_t i;
+
+ if (name == NULL) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Driver name must be specified"));
+ return -1;
+ }
+
+ for (i = 0; i < virConnectDriverTabCount; i++) {
+ if (STREQ(virConnectDriverTab[i]->hypervisorDriver->name, name)) {
+ if (virConnectDriverTab[i]->interfaceDriver == NULL)
+ virConnectDriverTab[i]->interfaceDriver = virSharedInterfaceDriver;
+ if (virConnectDriverTab[i]->networkDriver == NULL)
+ virConnectDriverTab[i]->networkDriver = virSharedNetworkDriver;
+ if (virConnectDriverTab[i]->nodeDeviceDriver == NULL)
+ virConnectDriverTab[i]->nodeDeviceDriver = virSharedNodeDeviceDriver;
+ if (virConnectDriverTab[i]->nwfilterDriver == NULL)
+ virConnectDriverTab[i]->nwfilterDriver = virSharedNWFilterDriver;
+ if (virConnectDriverTab[i]->secretDriver == NULL)
+ virConnectDriverTab[i]->secretDriver = virSharedSecretDriver;
+ if (virConnectDriverTab[i]->storageDriver == NULL)
+ virConnectDriverTab[i]->storageDriver = virSharedStorageDriver;
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/libvirt_internal.h b/src/libvirt_internal.h
index 1313b58..2a7227b 100644
--- a/src/libvirt_internal.h
+++ b/src/libvirt_internal.h
@@ -289,4 +289,5 @@ virTypedParameterValidateSet(virConnectPtr conn,
virTypedParameterPtr params,
int nparams);
+int virAssignSharedDrivers(const char *name);
#endif
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index be6ee19..340555a 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -943,6 +943,7 @@ virFDStreamSetInternalCloseCb;
# libvirt_internal.h
+virAssignSharedDrivers;
virConnectSupportsFeature;
virDomainMigrateBegin3;
virDomainMigrateBegin3Params;
--
1.8.3.1
9 years, 4 months
[libvirt] [PATCH] libxl: open libxl log stream with libvirtd log_level
by Jim Fehlig
Instead of a hardcoded DEBUG log level, use the overall
daemon log level specified in libvirtd.conf when opening
a log stream with libxl. libxl is very verbose when DEBUG
log level is set, resulting in huge log files that can
potentially fill a disk. Control of libxl verbosity should
be placed in the administrator's hands.
Signed-off-by: Jim Fehlig <jfehlig(a)suse.com>
---
src/libxl/libxl_conf.c | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
index a76ad5a..40fa4b5 100644
--- a/src/libxl/libxl_conf.c
+++ b/src/libxl/libxl_conf.c
@@ -1496,6 +1496,7 @@ libxlDriverConfigNew(void)
{
libxlDriverConfigPtr cfg;
char *log_file = NULL;
+ xentoollog_level log_level;
char ebuf[1024];
unsigned int free_mem;
@@ -1540,9 +1541,24 @@ libxlDriverConfigNew(void)
}
VIR_FREE(log_file);
+ switch (virLogGetDefaultPriority()) {
+ case VIR_LOG_DEBUG:
+ log_level = XTL_DEBUG;
+ break;
+ case VIR_LOG_INFO:
+ log_level = XTL_INFO;
+ break;
+ case VIR_LOG_WARN:
+ log_level = XTL_WARN;
+ break;
+ case VIR_LOG_ERROR:
+ log_level = XTL_ERROR;
+ break;
+ }
+
cfg->logger =
(xentoollog_logger *)xtl_createlogger_stdiostream(cfg->logger_file,
- XTL_DEBUG, XTL_STDIOSTREAM_SHOW_DATE);
+ log_level, XTL_STDIOSTREAM_SHOW_DATE);
if (!cfg->logger) {
VIR_ERROR(_("cannot create logger for libxenlight, disabling driver"));
goto error;
--
2.5.0
9 years, 5 months
[libvirt] [sandbox] Weird apparmor problems
by Cedric Bosdonnat
Hi all,
I'm seeing weird apparmor errors when running virt-sandbox here. Here are the log entries:
apparmor="ALLOWED" operation="mknod" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/var/lib/libvirt/qemu/sandbox.monitor" pid=2251 comm="qemu-system-x86" requested_mask="c" denied_mask="c" fsuid=493 ouid=493
apparmor="ALLOWED" operation="open" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/dev/ptmx" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=0
apparmor="ALLOWED" operation="open" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/dev/pts/2" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=493
apparmor="ALLOWED" operation="file_perm" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/var/log/libvirt/qemu/sandbox.log" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=0
apparmor="ALLOWED" operation="open" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/dev/ptmx" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=0
apparmor="ALLOWED" operation="open" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/dev/pts/3" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=493
apparmor="ALLOWED" operation="file_perm" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/var/log/libvirt/qemu/sandbox.log" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=0
apparmor="ALLOWED" operation="open" parent=1 profile="libvirt-634ed189-cca0-4126-830c-4e4a76846b25" name="/dev/kvm" pid=2251 comm="qemu-system-x86" requested_mask="w" denied_mask="w" fsuid=493 ouid=0
The weird thing is that /dev/kvm, /var/log/libvirt/qemu/sandbox.log
and /var/lib/libvirt/qemu/sandbox.monitor already have rules.
And I'm wondering if it's normal to have write access to /dev/pts/*
and /dev/ptmx.
Any idea?
--
Cedric
9 years, 5 months
[libvirt] strange stale qemu processes after domain shutdown
by Vasiliy Tolstov
I have 58 active domains with status running, and 62
qemu-system-x86_64 processes.
After investigating this issue, i found problem domains.
How to fix this issue and not lost this qemu processes?
ps auxww:
root 29561 0.2 0.2 1599628 743796 ? Sl Aug13 224:44
qemu-system-x86_64 -enable-kvm -name 29953 -S -machine
pc-i440fx-1.7,accel=kvm,usb=off -m 1024 -realtime
mlock=off -smp 1,sockets=1,cores=1,threads=1 -uuid
7ca8e593-29f7-6389-9b35-000071cc3e1e -no-user-config -nodefaults
-chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/29953.monitor,server,nowait
-mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc
-no-shutdown -boot strict=on -device
piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device
virtio-scsi-pci,id=scsi0,num_queues=1,bus=pci.0,addr=0x4 -device
virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x6 -drive
file=/dev/vg3/29953,if=none,id=drive-scsi0-0-0-0,format=raw,cache=none,discard=unmap,aio=native,iops=5000
-device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1
-drive if=none,id=drive-scsi0-0-1-0,readonly=on,format=raw -device
scsi-cd,bus=scsi0.0,channel=0,scsi-id=1,lun=0,drive=drive-scsi0-0-1-0,id=scsi0-0-1-0
-netdev tap,fd=353,id=hostnet0,vhost=on,vhostfd=354 -device
virtio-net-pci,netdev=hostnet0,id=net0,mac=52:54:00:00:40:25,bus=pci.0,addr=0x3,rombar=0
-chardev pty,id=charserial0 -device
isa-serial,chardev=charserial0,id=serial0 -chardev
socket,id=charchannel0,path=/var/lib/libvirt/qemu/29953.agent,server,nowait
-device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0
-device usb-mouse,id=input0 -device usb-kbd,id=input1 -vnc
[::]:23,password -device VGA,id=video0,vgamem_mb=16,bus=pci.0,addr=0x2
-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5 -object
rng-random,id=objrng0,filename=/dev/random -device
virtio-rng-pci,rng=objrng0,id=rng0,max-bytes=1024,period=2000,bus=pci.0,addr=0x7
-msg timestamp=on
libvirt log contains:
2015-10-13 06:52:05.504+0000: starting up libvirt version: 1.2.16,
qemu version: 2.3.0 (Debian
2.3.0-2+0~20150518103251.26+wheezy~1.gbp820cc6)
LC_ALL=C PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
HOME=/root USER=root LOGNAME=root QEMU_AUDIO_DRV=none /usr/bin/kvm
-name 29953 -S -machine pc-i440fx-1.7,accel=kvm,usb=off -m 1024
-realtime mlock=off -smp 1,sockets=1,cores=1,threads=1 -uuid
7ca8e593-29f7-6389-9b35-000071cc3e1e -no-user-config -nodefaults
-chardev socket,id=charmonitor,path=/var/lib/libvirt/qemu/29953.monitor,server,nowait
-mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc
-no-shutdown -boot strict=on
-device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2 -device
virtio-scsi-pci,id=scsi0,num_queues=1,bus=pci.0,addr=0x4 -device
virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=0x6 -drive
file=/dev/vg3/29953,if=none,id=drive-scsi0-0-0-0,format=raw,cache=none,discard=unmap,aio=native,iops=5000
-device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=1
-drive if=none,id=drive-scsi0-0-1-0,readonly=on,format=raw -device
scsi-cd,bus=scsi0.0,channel=0,scsi-id=1,lun=0,drive=drive-scsi0-0-1-0,id=scsi0-0-1-0
-netdev tap,fd=92,id=hostnet0,vhost=on,vhostfd=109 -device
virtio-net-pci,netdev=hostnet0,id=net0,mac=52:54:00:00:40:25,bus=pci.0,addr=0x3,rombar=0
-chardev pty,id=charserial0 -device
isa-serial,chardev=charserial0,id=serial0 -chardev
socket,id=charchannel0,path=/var/lib/libvirt/qemu/29953.agent,server,nowait
-device virtserialport,bus=virtio-serial0.0,nr=1,chardev=charchannel0,id=channel0,name=org.qemu.guest_agent.0
-device usb-mouse,id=input0 -device usb-kbd,id=input1 -vnc
[::]:46,password -device VGA,id=video0,vgamem_mb=16,bus=pci.0,addr=0x2
-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5 -object
rng-random,id=objrng0,filename=/dev/random -device
virtio-rng-pci,rng=objrng0,id=rng0,max-bytes=1024,period=2000,bus=pci.0,addr=0x7
-msg timestamp=on
Domain id=3262 is tainted: high-privileges
char device redirected to /dev/pts/45 (label charserial0)
qemu: terminating on signal 15 from pid 14945
2015-10-14 19:32:06.672+0000: shutting down
--
Vasiliy Tolstov,
e-mail: v.tolstov(a)selfip.ru
9 years, 5 months
[libvirt] [PATCH 0/2] Hyper-v crash feature support
by Dmitry Andreev
A new Hyper-V cpu feature 'hv_crash' was added to QEMU. The feature
will become available in v2.5.0.
This patch adds support for this feature.
Dmitry Andreev (2):
conf: add crash to hyperv features
qemu: add hv_crash support
docs/formatdomain.html.in | 7 +++++++
docs/schemas/domaincommon.rng | 5 +++++
src/conf/domain_conf.c | 6 +++++-
src/conf/domain_conf.h | 1 +
src/qemu/qemu_command.c | 2 ++
tests/qemuxml2argvdata/qemuxml2argv-hyperv-off.xml | 1 +
tests/qemuxml2argvdata/qemuxml2argv-hyperv.args | 4 ++--
tests/qemuxml2argvdata/qemuxml2argv-hyperv.xml | 1 +
8 files changed, 24 insertions(+), 3 deletions(-)
--
1.8.3.1
9 years, 5 months
[libvirt] Assert with libvirt + xen hvm
by CloudPatch Staff
We're hitting an assert whenever we try to create an HVM instance under Xen
via libvirtd.
System is running on Gentoo, package information as follows:
app-emulation/xen-4.5.0 USE="api debug flask hvm pam pygrub python qemu
screen"
app-emulation/xen-tools-4.5.0 USE="api debug flask hvm pam pygrub python
qemu screen"
app-emulation/libvirt-1.2.11-r2:0/1.2.11 USE="caps libvirtd lvm macvtap nls
qemu udev vepa virtualbox xen"
The following commands are run in parallel:
vmmachine ~ # libvirtd --listen
2015-01-22 16:33:13.596+0000: 2620: info : libvirt version: 1.2.11
2015-01-22 16:33:13.596+0000: 2620: error : udevGetDMIData:1607 : Failed to
get udev device for syspath '/sys/devices/virtual/dmi/id' or
'/sys/class/dmi/id'
libvirtd: libxl_fork.c:350: sigchld_installhandler_core: Assertion
`((void)"application must negotiate with libxl about SIGCHLD",
!(sigchld_saved_action.sa_flags & 4) &&
(sigchld_saved_action.__sigaction_handler.sa_handler == ((__sighandler_t)
0) || sigchld_saved_action.__sigaction_handler.sa_handler ==
((__sighandler_t) 1)))' failed.
Aborted
vmmachine ~ # VIRSH_DEBUG=0 virsh create xml
create: file(optdata): xml
libvirt: XML-RPC error : End of file while reading data: Input/output error
error: Failed to create domain from xml
error: End of file while reading data: Input/output error
libvirt: Domain Config error : Requested operation is not valid: A
different callback was requested
9 years, 5 months
[libvirt] [PATCH 00/10] Allow memory hotplug without NUMA on ppc64
by Peter Krempa
Peter Krempa (10):
conf: Make @def const in virDomainDefGetMemoryInitial
conf: Turn targetNode in struct virDomainMemoryDef to signed
qemu: command: Make qemuBuildMemoryBackendStr usable without NUMA
qemu: command: Always execute memory device formatter
qemu: domain: Add common function to perform memory hotplug checks
qemu: command: Move dimm device checks from formatter to checker
qemu: domain: Remove memory device check from post parse callback
conf: Prepare making memory device target node optional
qemu: command: Prepare memory device def formatter for missing target
node
qemu: ppc64: Support memory hotplug without NUMA enabled
docs/formatdomain.html.in | 5 +-
docs/schemas/domaincommon.rng | 8 +-
src/conf/domain_conf.c | 18 ++-
src/conf/domain_conf.h | 4 +-
src/qemu/qemu_command.c | 143 +++++------------
src/qemu/qemu_command.h | 4 +-
src/qemu/qemu_domain.c | 177 ++++++++++++++++++++-
src/qemu/qemu_domain.h | 4 +
src/qemu/qemu_hotplug.c | 11 +-
.../qemuxml2argv-memory-hotplug-ppc64-nonuma.args | 19 +++
.../qemuxml2argv-memory-hotplug-ppc64-nonuma.xml | 38 +++++
tests/qemuxml2argvtest.c | 2 +
12 files changed, 296 insertions(+), 137 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-ppc64-nonuma.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-ppc64-nonuma.xml
--
2.4.5
9 years, 5 months
[libvirt] [PATCH] vz: support cpu time in driver's domainGetInfo
by Nikolay Shirokovskiy
Just straight-forward patch.
Use reference counting for privdom as stats internally could drop domain lock.
Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy(a)virtuozzo.com>
---
src/vz/vz_driver.c | 19 ++++++++++++++++---
1 files changed, 16 insertions(+), 3 deletions(-)
diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index 6f1cbfb..0a968b9 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -554,7 +554,7 @@ vzDomainGetInfo(virDomainPtr domain, virDomainInfoPtr info)
virDomainObjPtr privdom;
int ret = -1;
- if (!(privdom = vzDomObjFromDomain(domain)))
+ if (!(privdom = vzDomObjFromDomainRef(domain)))
goto cleanup;
info->state = virDomainObjGetState(privdom, NULL);
@@ -562,11 +562,24 @@ vzDomainGetInfo(virDomainPtr domain, virDomainInfoPtr info)
info->maxMem = virDomainDefGetMemoryActual(privdom->def);
info->nrVirtCpu = privdom->def->vcpus;
info->cpuTime = 0;
+
+ if (virDomainObjIsActive(privdom)) {
+ unsigned long long vtime;
+ size_t i;
+
+ for (i = 0; i < privdom->def->vcpus; ++i) {
+ if (prlsdkGetVcpuStats(privdom, i, &vtime) < 0) {
+ virReportError(VIR_ERR_OPERATION_FAILED, "%s",
+ _("cannot read cputime for domain"));
+ goto cleanup;
+ }
+ info->cpuTime += vtime;
+ }
+ }
ret = 0;
cleanup:
- if (privdom)
- virObjectUnlock(privdom);
+ virDomainObjEndAPI(&privdom);
return ret;
}
--
1.7.1
9 years, 5 months