watchdog fed successfully event of 6300esb
by ligang (P)
Hi folks,
I have an question to discuss about the 6300esb watchdog.
I think is it possible that qemu can send an event while the watchdog successfully fed by the vm at the first time.
Here is the situation:
Qemu will send an VIR_DOMAIN_EVENT_ID_WATCHDOG event while watch dog timeout, and if the action of the watchdog in xml of the vm was set to "reset", the vm will be rebooted while timeout.
I have an monitor process that register callback function of the VIR_DOMAIN_EVENT_ID_WATCHDOG event, the callback function will send an alarm to my upper layer monitor platform indicate that the vm is fault, and the cluster deployed business on the vm will isolate the vm by the alarm.
And after the vm rebooted , the monitor process will receive an reboot event and send it to the platform, the upper layer monitor platform will clear the alarm, and business continue to run on the vm.
In most cases ,the watch dog process in vm will feed the watchdog after vm rebooted and all things go back on track.
In some other cases,the guestos may failed to start (in my environment vm start failed by io error), but the reboot event will still be received and the alarm will be cleared and the vm is still fault. So the this may not a good idea to clear the alarm by the reboot event.
So, I think it will be helpful that the qemu can send an event while the watchdog successfully fed by the vm at the first time. So I can exactly know that the guest os go back on running and the watch dog initialized successfully.
Or any other opintion about this situation.
Thanks.
4 years, 3 months
[PATCH v2] qemu_namespace: Don't leak mknod items that are being skipped over
by Michal Privoznik
When building and populating domain NS a couple of functions are
called that append paths to a string list. This string list is
then inspected, one item at the time by
qemuNamespacePrepareOneItem() which gathers all the info for
given path (stat buffer, possible link target, ACLs, SELinux
label) using qemuNamespaceMknodItemInit(). If the path needs to
be created in the domain's private /dev then it's added onto this
qemuNamespaceMknodData list which is freed later in the process.
But, if the path does not need to be created in the domain's
private /dev, then the memory allocated by
qemuNamespaceMknodItemInit() is not freed anywhere leading to a
leak.
Signed-off-by: Michal Privoznik <mprivozn(a)redhat.com>
---
v2 of:
https://www.redhat.com/archives/libvir-list/2020-September/msg00248.html
diff to v2:
- autocleanup approach is implemented in qemuNamespacePrepareOneItem()
src/qemu/qemu_namespace.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_namespace.c b/src/qemu/qemu_namespace.c
index 87f4fd8d58..b0d1d0d083 100644
--- a/src/qemu/qemu_namespace.c
+++ b/src/qemu/qemu_namespace.c
@@ -871,7 +871,7 @@ qemuDomainNamespaceAvailable(qemuDomainNamespace ns G_GNUC_UNUSED)
typedef struct _qemuNamespaceMknodItem qemuNamespaceMknodItem;
typedef qemuNamespaceMknodItem *qemuNamespaceMknodItemPtr;
struct _qemuNamespaceMknodItem {
- const char *file;
+ char *file;
char *target;
bool bindmounted;
GStatBuf sb;
@@ -892,6 +892,7 @@ struct _qemuNamespaceMknodData {
static void
qemuNamespaceMknodItemClear(qemuNamespaceMknodItemPtr item)
{
+ VIR_FREE(item->file);
VIR_FREE(item->target);
virFileFreeACLs(&item->acl);
#ifdef WITH_SELINUX
@@ -900,6 +901,8 @@ qemuNamespaceMknodItemClear(qemuNamespaceMknodItemPtr item)
}
+G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(qemuNamespaceMknodItem, qemuNamespaceMknodItemClear);
+
static void
qemuNamespaceMknodDataClear(qemuNamespaceMknodDataPtr data)
{
@@ -1091,7 +1094,7 @@ qemuNamespaceMknodItemInit(qemuNamespaceMknodItemPtr item,
bool isLink;
bool needsBindMount;
- item->file = file;
+ item->file = g_strdup(file);
if (g_lstat(file, &item->sb) < 0) {
if (errno == ENOENT)
@@ -1166,11 +1169,13 @@ qemuNamespacePrepareOneItem(qemuNamespaceMknodDataPtr data,
size_t ndevMountsPath)
{
long ttl = sysconf(_SC_SYMLOOP_MAX);
- const char *next = file;
+ g_autofree char *next = g_strdup(file);
size_t i;
while (1) {
- qemuNamespaceMknodItem item = { 0 };
+ g_auto(qemuNamespaceMknodItem) item = { 0 };
+ bool isLink;
+ bool addToData = false;
int rc;
rc = qemuNamespaceMknodItemInit(&item, cfg, vm, next);
@@ -1182,6 +1187,8 @@ qemuNamespacePrepareOneItem(qemuNamespaceMknodDataPtr data,
return -1;
}
+ isLink = S_ISLNK(item.sb.st_mode);
+
if (STRPREFIX(next, QEMU_DEVPREFIX)) {
for (i = 0; i < ndevMountsPath; i++) {
if (STREQ(devMountsPath[i], "/dev"))
@@ -1190,12 +1197,18 @@ qemuNamespacePrepareOneItem(qemuNamespaceMknodDataPtr data,
break;
}
- if (i == ndevMountsPath &&
- VIR_APPEND_ELEMENT_COPY(data->items, data->nitems, item) < 0)
- return -1;
+ if (i == ndevMountsPath)
+ addToData = true;
}
- if (!S_ISLNK(item.sb.st_mode))
+ g_free(next);
+ next = g_strdup(item.target);
+
+ if (addToData &&
+ VIR_APPEND_ELEMENT(data->items, data->nitems, item) < 0)
+ return -1;
+
+ if (!isLink)
break;
if (ttl-- == 0) {
@@ -1204,8 +1217,6 @@ qemuNamespacePrepareOneItem(qemuNamespaceMknodDataPtr data,
next);
return -1;
}
-
- next = item.target;
}
return 0;
--
2.26.2
4 years, 3 months
[PATCH] qemu: Allow setting affinity to fail and don't report error
by Martin Kletzander
This is just a clean-up of commit 3791f29b085c using the new parameter of
virProcessSetAffinity() introduced in commit 9514e24984ee so that there is
no error reported in the logs.
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
src/qemu/qemu_process.c | 65 +++++++++++++++++++----------------------
1 file changed, 30 insertions(+), 35 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 04e5cbb65969..e29f35e10576 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2569,24 +2569,21 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm)
return -1;
}
+ /*
+ * We only want to error out if we failed to set the affinity to
+ * user-requested mapping. If we are just trying to reset the affinity
+ * to all CPUs and this fails it can only be an issue if:
+ * 1) libvirtd does not have CAP_SYS_NICE
+ * 2) libvirtd does not run on all CPUs
+ *
+ * This scenario can easily occurr when libvirtd is run inside a
+ * container with restrictive permissions and CPU pinning.
+ *
+ * See also: https://bugzilla.redhat.com/1819801#c2
+ */
if (cpumapToSet &&
- virProcessSetAffinity(vm->pid, cpumapToSet, false) < 0) {
- /*
- * We only want to error out if we failed to set the affinity to
- * user-requested mapping. If we are just trying to reset the affinity
- * to all CPUs and this fails it can only be an issue if:
- * 1) libvirtd does not have CAP_SYS_NICE
- * 2) libvirtd does not run on all CPUs
- *
- * This scenario can easily occurr when libvirtd is run inside a
- * container with restrictive permissions and CPU pinning.
- *
- * See also: https://bugzilla.redhat.com/1819801#c2
- */
- if (settingAll)
- virResetLastError();
- else
- return -1;
+ virProcessSetAffinity(vm->pid, cpumapToSet, settingAll) < 0) {
+ return -1;
}
return 0;
@@ -2739,25 +2736,23 @@ qemuProcessSetupPid(virDomainObjPtr vm,
if (!affinity_cpumask)
affinity_cpumask = use_cpumask;
- /* Setup legacy affinity. */
+ /* Setup legacy affinity.
+ *
+ * We only want to error out if we failed to set the affinity to
+ * user-requested mapping. If we are just trying to reset the affinity
+ * to all CPUs and this fails it can only be an issue if:
+ * 1) libvirtd does not have CAP_SYS_NICE
+ * 2) libvirtd does not run on all CPUs
+ *
+ * This scenario can easily occurr when libvirtd is run inside a
+ * container with restrictive permissions and CPU pinning.
+ *
+ * See also: https://bugzilla.redhat.com/1819801#c2
+ */
if (affinity_cpumask &&
- virProcessSetAffinity(pid, affinity_cpumask, false) < 0) {
- /*
- * We only want to error out if we failed to set the affinity to
- * user-requested mapping. If we are just trying to reset the affinity
- * to all CPUs and this fails it can only be an issue if:
- * 1) libvirtd does not have CAP_SYS_NICE
- * 2) libvirtd does not run on all CPUs
- *
- * This scenario can easily occurr when libvirtd is run inside a
- * container with restrictive permissions and CPU pinning.
- *
- * See also: https://bugzilla.redhat.com/1819801#c2
- */
- if (affinity_cpumask == hostcpumap)
- virResetLastError();
- else
- goto cleanup;
+ virProcessSetAffinity(pid, affinity_cpumask,
+ affinity_cpumask == hostcpumap) < 0) {
+ goto cleanup;
}
/* Set scheduler type and priority, but not for the main thread. */
--
2.28.0
4 years, 3 months
[PATCH] Do not report error when setting affinity is allowed to fail
by Martin Kletzander
Suggested-by: Ján Tomko <jtomko(a)redhat.com>
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
src/lxc/lxc_controller.c | 2 +-
src/qemu/qemu_driver.c | 7 ++++---
src/qemu/qemu_process.c | 10 +++++-----
src/util/virprocess.c | 32 +++++++++++++++++++++++---------
src/util/virprocess.h | 2 +-
5 files changed, 34 insertions(+), 19 deletions(-)
diff --git a/src/lxc/lxc_controller.c b/src/lxc/lxc_controller.c
index 0a496fb7886c..37a28ac2f3c1 100644
--- a/src/lxc/lxc_controller.c
+++ b/src/lxc/lxc_controller.c
@@ -775,7 +775,7 @@ static int virLXCControllerSetupCpuAffinity(virLXCControllerPtr ctrl)
* so use '0' to indicate our own process ID. No threads are
* running at this point
*/
- if (virProcessSetAffinity(0 /* Self */, cpumapToSet) < 0) {
+ if (virProcessSetAffinity(0 /* Self */, cpumapToSet, false) < 0) {
virBitmapFree(cpumap);
return -1;
}
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index ce72e1021d16..a792a1283253 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4529,7 +4529,8 @@ qemuDomainPinVcpuLive(virDomainObjPtr vm,
goto cleanup;
}
- if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu), cpumap) < 0)
+ if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu),
+ cpumap, false) < 0)
goto cleanup;
}
@@ -4747,7 +4748,7 @@ qemuDomainPinEmulator(virDomainPtr dom,
}
}
- if (virProcessSetAffinity(vm->pid, pcpumap) < 0)
+ if (virProcessSetAffinity(vm->pid, pcpumap, false) < 0)
goto endjob;
virBitmapFree(def->cputune.emulatorpin);
@@ -5222,7 +5223,7 @@ qemuDomainPinIOThread(virDomainPtr dom,
}
}
- if (virProcessSetAffinity(iothrid->thread_id, pcpumap) < 0)
+ if (virProcessSetAffinity(iothrid->thread_id, pcpumap, false) < 0)
goto endjob;
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 17d083d192a5..423c60c7c85b 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2573,7 +2573,7 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm)
}
if (cpumapToSet &&
- virProcessSetAffinity(vm->pid, cpumapToSet) < 0) {
+ virProcessSetAffinity(vm->pid, cpumapToSet, false) < 0) {
/*
* We only want to error out if we failed to set the affinity to
* user-requested mapping. If we are just trying to reset the affinity
@@ -2743,7 +2743,8 @@ qemuProcessSetupPid(virDomainObjPtr vm,
affinity_cpumask = use_cpumask;
/* Setup legacy affinity. */
- if (affinity_cpumask && virProcessSetAffinity(pid, affinity_cpumask) < 0) {
+ if (affinity_cpumask &&
+ virProcessSetAffinity(pid, affinity_cpumask, false) < 0) {
/*
* We only want to error out if we failed to set the affinity to
* user-requested mapping. If we are just trying to reset the affinity
@@ -2751,9 +2752,8 @@ qemuProcessSetupPid(virDomainObjPtr vm,
* 1) libvirtd does not have CAP_SYS_NICE
* 2) libvirtd does not run on all CPUs
*
- * However since this scenario is very improbable, we rather skip
- * reporting the error because it helps running libvirtd in a a scenario
- * where pinning is handled by someone else.
+ * This scenario can easily occurr when libvirtd is run inside a
+ * container with restrictive permissions and CPU pinning.
*
* See also: https://bugzilla.redhat.com/1819801#c2
*/
diff --git a/src/util/virprocess.c b/src/util/virprocess.c
index 9de356505104..e9df56389621 100644
--- a/src/util/virprocess.c
+++ b/src/util/virprocess.c
@@ -441,7 +441,7 @@ int virProcessKillPainfully(pid_t pid, bool force)
#if WITH_SCHED_GETAFFINITY
-int virProcessSetAffinity(pid_t pid, virBitmapPtr map)
+int virProcessSetAffinity(pid_t pid, virBitmapPtr map, bool quiet)
{
size_t i;
int numcpus = 1024;
@@ -479,9 +479,14 @@ int virProcessSetAffinity(pid_t pid, virBitmapPtr map)
numcpus = numcpus << 2;
goto realloc;
}
- virReportSystemError(errno,
- _("cannot set CPU affinity on process %d"), pid);
- return -1;
+ if (quiet) {
+ VIR_DEBUG("cannot set CPU affinity on process %d: %s",
+ pid, g_strerror(errno));
+ } else {
+ virReportSystemError(errno,
+ _("cannot set CPU affinity on process %d"), pid);
+ return -1;
+ }
}
CPU_FREE(mask);
@@ -533,7 +538,8 @@ virProcessGetAffinity(pid_t pid)
#elif defined(WITH_BSD_CPU_AFFINITY)
int virProcessSetAffinity(pid_t pid,
- virBitmapPtr map)
+ virBitmapPtr map,
+ bool quiet)
{
size_t i;
cpuset_t mask;
@@ -546,9 +552,14 @@ int virProcessSetAffinity(pid_t pid,
if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, pid,
sizeof(mask), &mask) != 0) {
- virReportSystemError(errno,
- _("cannot set CPU affinity on process %d"), pid);
- return -1;
+ if (quiet) {
+ VIR_DEBUG("cannot set CPU affinity on process %d: %s",
+ pid, g_strerror(errno));
+ } else {
+ virReportSystemError(errno,
+ _("cannot set CPU affinity on process %d"), pid);
+ return -1;
+ }
}
return 0;
@@ -582,8 +593,11 @@ virProcessGetAffinity(pid_t pid)
#else /* WITH_SCHED_GETAFFINITY */
int virProcessSetAffinity(pid_t pid G_GNUC_UNUSED,
- virBitmapPtr map G_GNUC_UNUSED)
+ virBitmapPtr map G_GNUC_UNUSED,
+ bool quiet G_GNUC_UNUSED)
{
+ /* The @quiet parameter is ignored here, it is used only for silencing
+ * actual failures. */
virReportSystemError(ENOSYS, "%s",
_("Process CPU affinity is not supported on this platform"));
return -1;
diff --git a/src/util/virprocess.h b/src/util/virprocess.h
index 437deb18305b..34210d6c9d62 100644
--- a/src/util/virprocess.h
+++ b/src/util/virprocess.h
@@ -58,7 +58,7 @@ int virProcessKillPainfullyDelay(pid_t pid,
bool force,
unsigned int extradelay);
-int virProcessSetAffinity(pid_t pid, virBitmapPtr map);
+int virProcessSetAffinity(pid_t pid, virBitmapPtr map, bool quiet);
virBitmapPtr virProcessGetAffinity(pid_t pid);
--
2.28.0
4 years, 3 months
Various issues when using multiple graphic outputs
by Christian Ehrhardt
Hi,
I've had continuous issues with this and wanted to reach out
if that is a common issue everyone has or just me lacking a little
detail on my setup.
Setup:
- tried qemu up to 4.2
- tried libvirt up to 6.0
- virt-viewer up to 7.0-2build1
- virt-manager up to 2.2.1
- I plan to retry with qemu 5.0, libvirt 6.6 and virt-viewer 9.0 but I
don't have all pieces ready yet.
- get a Desktop Guest (I've seen it with Ubuntu and Windows, therefore
I'm rather sure it will affect all desktops)
- configure your guest-xml to use multiple graphic adapters and start it
- connect with virt-viewer to your guest which will open both screens
The exact issue depends on the kind of graphic device I configure.
The following list is ordered in terms of increasing confusion :-):
- QXL with multiple heads
- works fine (so multi display in general seems fine for the
involved components)
- VGA + QXL, QXL + Virtio, Virtio + QXL:
- Guest only detects primary display
- Can't convince X in the guest to use the second device as well
- lspci lists both devices just fine
- dmesg shows the kernel is initializing them, e.g. drm for virtio
- 2*QXL
- X has issues to initialize on user login
- no error in the log, just hung
- QXL + Mdev
- mouse handling seems broken
- in gnome I see no mouse pointer at all
- in windows it is just strange
- has offsets relative to mouse pointer entering
- and can't move in one random direction (e.g. not up)
- keyboard works just fine
- I have played with adding/removing different input devices without
much success.
- I disabled the qxl display via xorg conf without the case getting any better
The last case is what I initially tried and totally confused me at first.
And to be clear, each of those graphics adapter types works fine if being
attached "alone" on the guest (Except mdev alone, as I'm unable to convince
libvirt to only attach the mdev without adding back a qxl graphics adapter).
The Guest XML is the default that virt-manager gives me plus adding the second
graphics adapter (example 2*QXL: https://paste.ubuntu.com/p/cVw8GVZ9dD/).
If this is known in some way or even "yeah try version XY of component Foo" I'm
happy about any hint/pointer you can give me. But if there is a chance that I'm
the only one seeing it I'd like to understand why.
Thanks in advance for thinking into this with me,
Christian
P.S. to avoid cross-posting I'll start with libvirt as it is in the
middle of all involved components.
4 years, 3 months
[PATCH] Fix linkage to libutil and libkvm on FreeBSD 11
by Daniel P. Berrangé
We are currently adding -lutil and -lkvm to the linker using the
add_project_link_arguments method. On FreeBSD 11.4, this results in
build errors because the args appear too early in the command line.
We need to pass the libraries as dependencies so that they get placed
at the same point in the linker args as other dependencies.
Signed-off-by: Daniel P. Berrangé <berrange(a)redhat.com>
---
meson.build | 16 +++++++---------
src/bhyve/meson.build | 15 +++++++++++----
src/util/meson.build | 26 +++++++++++++++++---------
3 files changed, 35 insertions(+), 22 deletions(-)
Using the CI patch I posted earlier to add FreeBSD 11:
https://gitlab.com/berrange/libvirt/-/pipelines/185834799
diff --git a/meson.build b/meson.build
index 1eadea33bf..c30ff187aa 100644
--- a/meson.build
+++ b/meson.build
@@ -1086,7 +1086,8 @@ endif
# Check for BSD kvm (kernel memory interface)
if host_machine.system() == 'freebsd'
kvm_dep = cc.find_library('kvm')
- add_project_link_arguments('-lkvm', language: 'c')
+else
+ kvm_dep = disabler()
endif
libiscsi_version = '1.18.0'
@@ -1203,11 +1204,9 @@ have_gnu_gettext_tools = false
if not get_option('nls').disabled()
have_gettext = cc.has_function('gettext')
if not have_gettext
- intl_lib = cc.find_library('intl', required: false)
- have_gettext = intl_lib.found()
- if have_gettext
- add_project_link_arguments('-lintl', language: 'c')
- endif
+ intl_dep = cc.find_library('intl', required: false)
+ else
+ intl_dep = disabler()
endif
if not have_gettext and get_option('nls').enabled()
error('gettext() is required to build libvirt')
@@ -1235,6 +1234,8 @@ if not get_option('nls').disabled()
have_gnu_gettext_tools = true
endif
endif
+else
+ intl_dep = disabler()
endif
numactl_dep = cc.find_library('numa', required: get_option('numactl'))
@@ -1402,9 +1403,6 @@ if udev_dep.found()
endif
util_dep = cc.find_library('util', required: false)
-if util_dep.found()
- add_project_link_arguments('-lutil', language: 'c')
-endif
if not get_option('virtualport').disabled()
if cc.has_header_symbol('linux/if_link.h', 'IFLA_PORT_MAX')
diff --git a/src/bhyve/meson.build b/src/bhyve/meson.build
index 7d54718820..c382f64aee 100644
--- a/src/bhyve/meson.build
+++ b/src/bhyve/meson.build
@@ -14,15 +14,22 @@ driver_source_files += bhyve_sources
stateful_driver_source_files += bhyve_sources
if conf.has('WITH_BHYVE')
+ bhyve_driver_deps = [
+ access_dep,
+ src_dep,
+ ]
+ if kvm_dep.found()
+ bhyve_driver_deps += kvm_dep
+ endif
+ if util_dep.found()
+ bhyve_driver_deps += util_dep
+ endif
bhyve_driver_impl = static_library(
'virt_driver_bhyve_impl',
[
bhyve_sources,
],
- dependencies: [
- access_dep,
- src_dep,
- ],
+ dependencies: bhyve_driver_deps,
include_directories: [
conf_inc_dir,
hypervisor_inc_dir,
diff --git a/src/util/meson.build b/src/util/meson.build
index f7092cc3f1..c899f232e6 100644
--- a/src/util/meson.build
+++ b/src/util/meson.build
@@ -172,15 +172,7 @@ io_helper_sources = [
'iohelper.c',
]
-virt_util_lib = static_library(
- 'virt_util',
- [
- util_sources,
- util_public_sources,
- keycode_gen_sources,
- dtrace_gen_headers,
- ],
- dependencies: [
+virt_util_deps = [
acl_dep,
audit_dep,
capng_dep,
@@ -195,7 +187,23 @@ virt_util_lib = static_library(
thread_dep,
win32_dep,
yajl_dep,
+ ]
+if util_dep.found()
+ virt_util_deps += util_dep
+endif
+if intl_dep.found()
+ virt_util_deps += intl_dep
+endif
+
+virt_util_lib = static_library(
+ 'virt_util',
+ [
+ util_sources,
+ util_public_sources,
+ keycode_gen_sources,
+ dtrace_gen_headers,
],
+ dependencies: virt_util_deps,
)
libvirt_libs += virt_util_lib
--
2.26.2
4 years, 3 months
[PATCH v2 00/13] resolve hangs/crashes on libvirtd shutdown
by Nikolay Shirokovskiy
I keep qemu VM event loop exiting synchronously but add code to avoid deadlock
that can be caused by this approach. I guess it is worth having synchronous
exiting of threads in this case to avoid crashes.
Patches that are already positively reviewed has appropriate 'Reviewed-by' lines.
Changes from v1:
- rename stateShutdown to state stateShutdownPrepare
- introduce net daemon shutdown callbacks
- make some adjustments in terms of qemu per VM's event loop thread
finishing
- factor out net server shutdown facilities into distinct patch
- increase shutdown timeout from 15s to 30s
Nikolay Shirokovskiy (13):
libvirt: add stateShutdownPrepare/stateShutdownWait to drivers
util: always initialize priority condition
util: add stop/drain functions to thread pool
rpc: don't unref service ref on socket behalf twice
rpc: add virNetDaemonSetShutdownCallbacks
rpc: add shutdown facilities to netserver
rpc: finish all threads before exiting main loop
qemu: don't shutdown event thread in monitor EOF callback
vireventthread: exit thread synchronously on finalize
qemu: avoid deadlock in qemuDomainObjStopWorker
qemu: implement driver's shutdown/shutdown wait methods
rpc: cleanup virNetDaemonClose method
util: remove unused virThreadPoolNew macro
scripts/check-drivername.py | 2 +
src/driver-state.h | 8 ++++
src/libvirt.c | 42 ++++++++++++++++
src/libvirt_internal.h | 2 +
src/libvirt_private.syms | 4 ++
src/libvirt_remote.syms | 2 +-
src/qemu/qemu_domain.c | 18 +++++--
src/qemu/qemu_driver.c | 32 +++++++++++++
src/qemu/qemu_process.c | 3 --
src/remote/remote_daemon.c | 6 +--
src/rpc/virnetdaemon.c | 109 ++++++++++++++++++++++++++++++++++++------
src/rpc/virnetdaemon.h | 8 +++-
src/rpc/virnetserver.c | 8 ++++
src/rpc/virnetserver.h | 1 +
src/rpc/virnetserverservice.c | 1 -
src/util/vireventthread.c | 1 +
src/util/virthreadpool.c | 65 +++++++++++++++++--------
src/util/virthreadpool.h | 6 +--
18 files changed, 267 insertions(+), 51 deletions(-)
--
1.8.3.1
4 years, 3 months
[libvirt PATCH] qemu: migration: remove unused variable
by Ján Tomko
../src/qemu/qemu_migration.c:4091:36: error: unused variable 'cfg' [-Werror,-Wunused-variable]
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
Signed-off-by: Ján Tomko <jtomko(a)redhat.com>
Fixes: d92c2bbc6597fcb951b303a9122ec1ca71514d10
---
Pushed.
src/qemu/qemu_migration.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 03d6a522e7..a530c17582 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -4088,7 +4088,6 @@ qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
{
int ret = -1;
qemuMigrationSpec spec;
- g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
int fds[2] = { -1, -1 };
VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
--
2.26.2
4 years, 3 months
[PATCH] qemu: Do not error out when setting affinity failed
by Martin Kletzander
At least in a particular scenario described in the code. Basically when
libvirtd is running without CAP_SYS_NICE (e.g. in a container) and it is trying
to set QEMU affinity to all CPUs (because there is no setting requested in the
XML) it fails. But if we ignore the failure in this particular case than you
can limit the CPUs used by controlling the affinity for libvirtd itself.
In any other case (anything requested in the XML, pinning a live domain, etc.)
the call is still considered fatal and the action errors out.
Resolves: https://bugzilla.redhat.com/1819801
Suggested-by: Daniel P. Berrangé <berrange(a)redhat.com>
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
src/qemu/qemu_process.c | 41 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 38 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index cfe09d632633..270bb37d3682 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2528,6 +2528,7 @@ qemuProcessGetAllCpuAffinity(virBitmapPtr *cpumapRet)
static int
qemuProcessInitCpuAffinity(virDomainObjPtr vm)
{
+ bool settingAll = false;
g_autoptr(virBitmap) cpumapToSet = NULL;
virDomainNumatuneMemMode mem_mode;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -2566,13 +2567,30 @@ qemuProcessInitCpuAffinity(virDomainObjPtr vm)
if (!(cpumapToSet = virBitmapNewCopy(vm->def->cputune.emulatorpin)))
return -1;
} else {
+ settingAll = true;
if (qemuProcessGetAllCpuAffinity(&cpumapToSet) < 0)
return -1;
}
if (cpumapToSet &&
virProcessSetAffinity(vm->pid, cpumapToSet) < 0) {
- return -1;
+ /*
+ * We only want to error out if we failed to set the affinity to
+ * user-requested mapping. If we are just trying to reset the affinity
+ * to all CPUs and this fails it can only be an issue if:
+ * 1) libvirtd does not have CAP_SYS_NICE
+ * 2) libvirtd does not run on all CPUs
+ *
+ * However since this scenario is very improbable, we rather skip
+ * reporting the error because it helps running libvirtd in a a scenario
+ * where pinning is handled by someone else.
+ *
+ * See also: https://bugzilla.redhat.com/1819801#c2
+ */
+ if (settingAll)
+ virResetLastError();
+ else
+ return -1;
}
return 0;
@@ -2726,8 +2744,25 @@ qemuProcessSetupPid(virDomainObjPtr vm,
affinity_cpumask = use_cpumask;
/* Setup legacy affinity. */
- if (affinity_cpumask && virProcessSetAffinity(pid, affinity_cpumask) < 0)
- goto cleanup;
+ if (affinity_cpumask && virProcessSetAffinity(pid, affinity_cpumask) < 0) {
+ /*
+ * We only want to error out if we failed to set the affinity to
+ * user-requested mapping. If we are just trying to reset the affinity
+ * to all CPUs and this fails it can only be an issue if:
+ * 1) libvirtd does not have CAP_SYS_NICE
+ * 2) libvirtd does not run on all CPUs
+ *
+ * However since this scenario is very improbable, we rather skip
+ * reporting the error because it helps running libvirtd in a a scenario
+ * where pinning is handled by someone else.
+ *
+ * See also: https://bugzilla.redhat.com/1819801#c2
+ */
+ if (affinity_cpumask == hostcpumap)
+ virResetLastError();
+ else
+ goto cleanup;
+ }
/* Set scheduler type and priority, but not for the main thread. */
if (sched &&
--
2.28.0
4 years, 3 months
[PATCH] qemu: Fix comment in qemuProcessSetupPid
by Martin Kletzander
This was supposed to be done in commit 3791f29b085c, but I missed a spot.
Signed-off-by: Martin Kletzander <mkletzan(a)redhat.com>
---
Pushed as trivial, also suggested here:
https://www.redhat.com/archives/libvir-list/2020-September/msg00275.html
src/qemu/qemu_process.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 17d083d192a5..934fab98bc42 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2751,9 +2751,8 @@ qemuProcessSetupPid(virDomainObjPtr vm,
* 1) libvirtd does not have CAP_SYS_NICE
* 2) libvirtd does not run on all CPUs
*
- * However since this scenario is very improbable, we rather skip
- * reporting the error because it helps running libvirtd in a a scenario
- * where pinning is handled by someone else.
+ * This scenario can easily occurr when libvirtd is run inside a
+ * container with restrictive permissions and CPU pinning.
*
* See also: https://bugzilla.redhat.com/1819801#c2
*/
--
2.28.0
4 years, 3 months