If libvirt daemon gets restarted and there is (at least) one
unresponsive qemu, the startup procedure hangs up. This patch creates
one thread per vm in which we try to reconnect to monitor. Therefore,
blocking in one thread will not affect other APIs.
---
src/qemu/qemu_driver.c | 23 +++---------
src/qemu/qemu_process.c | 87 ++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 85 insertions(+), 25 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 421a98e..4574b6c 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -143,26 +143,15 @@ qemuAutostartDomain(void *payload, const void *name
ATTRIBUTE_UNUSED, void *opaq
virDomainObjLock(vm);
virResetLastError();
- if (qemuDomainObjBeginJobWithDriver(data->driver, vm,
- QEMU_JOB_MODIFY) < 0) {
+ if (vm->autostart &&
+ !virDomainObjIsActive(vm) &&
+ qemuDomainObjStart(data->conn, data->driver, vm,
+ false, false,
+ data->driver->autoStartBypassCache) < 0) {
err = virGetLastError();
- VIR_ERROR(_("Failed to start job on VM '%s': %s"),
+ VIR_ERROR(_("Failed to autostart VM '%s': %s"),
vm->def->name,
err ? err->message : _("unknown error"));
- } else {
- if (vm->autostart &&
- !virDomainObjIsActive(vm) &&
- qemuDomainObjStart(data->conn, data->driver, vm,
- false, false,
- data->driver->autoStartBypassCache) < 0) {
- err = virGetLastError();
- VIR_ERROR(_("Failed to autostart VM '%s': %s"),
- vm->def->name,
- err ? err->message : _("unknown error"));
- }
-
- if (qemuDomainObjEndJob(data->driver, vm) == 0)
- vm = NULL;
}
if (vm)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 21e73a5..1daf6ae 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -820,6 +820,7 @@ qemuConnectMonitor(struct qemud_driver *driver, virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
+ qemuMonitorPtr mon = NULL;
if (virSecurityManagerSetSocketLabel(driver->securityManager, vm) < 0) {
VIR_ERROR(_("Failed to set security context for monitor for %s"),
@@ -831,15 +832,29 @@ qemuConnectMonitor(struct qemud_driver *driver, virDomainObjPtr vm)
* deleted while the monitor is active */
virDomainObjRef(vm);
- priv->mon = qemuMonitorOpen(vm,
- priv->monConfig,
- priv->monJSON,
- &monitorCallbacks);
+ ignore_value(virTimeMs(&priv->monStart));
+ virDomainObjUnlock(vm);
+ qemuDriverUnlock(driver);
+
+ mon = qemuMonitorOpen(vm,
+ priv->monConfig,
+ priv->monJSON,
+ &monitorCallbacks);
+
+ qemuDriverLock(driver);
+ virDomainObjLock(vm);
+ priv->monStart = 0;
/* Safe to ignore value since ref count was incremented above */
- if (priv->mon == NULL)
+ if (mon == NULL)
ignore_value(virDomainObjUnref(vm));
+ if (!virDomainObjIsActive(vm)) {
+ qemuMonitorClose(mon);
+ goto error;
+ }
+ priv->mon = mon;
+
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm) < 0) {
VIR_ERROR(_("Failed to clear security context for monitor for %s"),
vm->def->name);
@@ -2484,21 +2499,25 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
struct qemuProcessReconnectData {
virConnectPtr conn;
struct qemud_driver *driver;
+ void *payload;
};
/*
* Open an existing VM's monitor, re-detect VCPU threads
* and re-reserve the security labels in use
*/
static void
-qemuProcessReconnect(void *payload, const void *name ATTRIBUTE_UNUSED, void *opaque)
+qemuProcessReconnect(void *opaque)
{
- virDomainObjPtr obj = payload;
struct qemuProcessReconnectData *data = opaque;
struct qemud_driver *driver = data->driver;
+ virDomainObjPtr obj = data->payload;
qemuDomainObjPrivatePtr priv;
virConnectPtr conn = data->conn;
struct qemuDomainJobObj oldjob;
+ VIR_FREE(data);
+
+ qemuDriverLock(driver);
virDomainObjLock(obj);
qemuDomainObjRestoreJob(obj, &oldjob);
@@ -2572,12 +2591,15 @@ qemuProcessReconnect(void *payload, const void *name
ATTRIBUTE_UNUSED, void *opa
if (virDomainObjUnref(obj) > 0)
virDomainObjUnlock(obj);
+ qemuDriverUnlock(driver);
+
return;
error:
if (!virDomainObjIsActive(obj)) {
if (virDomainObjUnref(obj) > 0)
virDomainObjUnlock(obj);
+ qemuDriverUnlock(driver);
return;
}
@@ -2591,6 +2613,55 @@ error:
else
virDomainObjUnlock(obj);
}
+ qemuDriverUnlock(driver);
+}
+
+static void
+qemuProcessReconnectHelper(void *payload,
+ const void *name ATTRIBUTE_UNUSED,
+ void *opaque)
+{
+ virThread thread;
+ struct qemuProcessReconnectData *src = opaque;
+ struct qemuProcessReconnectData *data;
+
+ if (VIR_ALLOC(data) < 0) {
+ virReportOOMError();
+ return;
+ }
+
+ memcpy(data, src, sizeof(*data));
+ data->payload = payload;
+
+ /* This iterator is called with driver being locked.
+ * However, we need to unlock it so qemuProcessReconnect,
+ * which will run in separate thread can lock it itself
+ * (if needed). The whole idea behind: qemuProcessReconnect:
+ * 1. lock driver
+ * 2. just before monitor reconnect, do lightweight MonitorEnter
+ * (unlock VM & driver)
+ * 3. Reconnect to monitor
+ * 4. do lightweight MonitorExit (lock driver & VM)
+ * 5. continue reconnect process
+ * 6. unlock driver
+ *
+ * It is necessary to NOT hold driver lock for the entire run
+ * of reconnect, otherwise we will get blocked if there is
+ * unresponsive qemu.
+ * However, iterating over hash table MUST be done on locked
+ * driver.
+ *
+ * NB, we can't do normal MonitorEnter & MonitorExit because
+ * these two lock the monitor lock, which does not exists in
+ * this early phase.
+ */
+ qemuDriverUnlock(src->driver);
+ if (virThreadCreate(&thread, true, qemuProcessReconnect, data) < 0) {
+ qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Could not create thread. QEMU initialization "
+ "might be incomplete"));
+ }
+ qemuDriverLock(src->driver);
}
/**
@@ -2603,7 +2674,7 @@ void
qemuProcessReconnectAll(virConnectPtr conn, struct qemud_driver *driver)
{
struct qemuProcessReconnectData data = {conn, driver};
- virHashForEach(driver->domains.objs, qemuProcessReconnect, &data);
+ virHashForEach(driver->domains.objs, qemuProcessReconnectHelper, &data);
}
int qemuProcessStart(virConnectPtr conn,
--
1.7.3.4