RPC client implementation uses the following paradigm. The critical
section is organized via virObjectLock(client)/virObjectUnlock(client)
braces. Though this is potentially problematic as
main thread: side thread:
virObjectUnlock(client);
virObjectLock(client);
g_main_loop_quit(client->eventLoop);
virObjectUnlock(client);
g_main_loop_run(client->eventLoop);
This means in particular that is the main thread is executing very long
request like VM migration, the wakeup from the side thread could be
stuck until the main request will be fully completed.
For us this means that Nova was reported as inaccessible. Anyway, this
case is easily reproducible with the simple python scripts doing slow
and fast requests in parallel from two different threads.
This problem has been introduced with the rework for dropping gnulib
inside the following commit:
commit 7d4350bcac251bab2ecf85bd19eb1181db87fd07
Author: Daniel P. Berrangé <berrange(a)redhat.com>
Date: Thu Jan 16 11:21:44 2020 +0000
rpc: convert RPC client to use GMainLoop instead of poll
The cure is to revert back to old roots and use file descriptor for
wakeup notifications. The code written is well suited for Linux while it
is completely unclear how it will behave on Windows.
Signed-off-by: Denis V. Lunev <den(a)openvz.org>
---
src/rpc/virnetclient.c | 39 ++++++++++++++++++++++++++++++++++++++-
1 file changed, 38 insertions(+), 1 deletion(-)
diff --git a/src/rpc/virnetclient.c b/src/rpc/virnetclient.c
index d9a508246f..7fff5a7017 100644
--- a/src/rpc/virnetclient.c
+++ b/src/rpc/virnetclient.c
@@ -90,6 +90,7 @@ struct _virNetClient {
GMainLoop *eventLoop;
GMainContext *eventCtx;
+ int pipeLoopNotify[2];
/*
* List of calls currently waiting for dispatch
@@ -150,10 +151,25 @@ void virNetClientSetCloseCallback(virNetClient *client,
}
+static gboolean
+virNetClientIOEventNotify(int fd G_GNUC_UNUSED,
+ GIOCondition ev G_GNUC_UNUSED,
+ gpointer opaque)
+{
+ virNetClient *client = opaque;
+ char buf[1024];
+
+ while (saferead(client->pipeLoopNotify[0], buf, sizeof(buf)) > 0);
+ g_main_loop_quit(client->eventLoop);
+
+ return G_SOURCE_CONTINUE;
+}
+
static void
virNetClientIOWakeup(virNetClient *client)
{
- g_main_loop_quit(client->eventLoop);
+ char c = 0;
+ ignore_value(safewrite(client->pipeLoopNotify[1], &c, sizeof(c)));
}
@@ -305,10 +321,15 @@ static virNetClient *virNetClientNew(virNetSocket *sock,
const char *hostname)
{
virNetClient *client = NULL;
+ int pipenotify[2] = {-1, -1};
+ g_autoptr(GSource) G_GNUC_UNUSED source = NULL;
if (virNetClientInitialize() < 0)
goto error;
+ if (virPipeNonBlock(pipenotify) < 0)
+ goto error;
+
if (!(client = virObjectLockableNew(virNetClientClass)))
goto error;
@@ -320,12 +341,25 @@ static virNetClient *virNetClientNew(virNetSocket *sock,
client->hostname = g_strdup(hostname);
+ client->pipeLoopNotify[0] = pipenotify[0];
+ client->pipeLoopNotify[1] = pipenotify[1];
+
+ /* FIXME: good for Unix, buggy for Windows */
+ source = virEventGLibAddSocketWatch(pipenotify[0],
+ G_IO_IN | G_IO_ERR | G_IO_HUP,
+ client->eventCtx,
+ virNetClientIOEventNotify,
+ client, NULL);
+
PROBE(RPC_CLIENT_NEW,
"client=%p sock=%p",
client, client->sock);
return client;
error:
+ VIR_FORCE_CLOSE(pipenotify[0]);
+ VIR_FORCE_CLOSE(pipenotify[1]);
+
virObjectUnref(client);
virObjectUnref(sock);
return NULL;
@@ -759,6 +793,9 @@ void virNetClientDispose(void *obj)
g_main_loop_unref(client->eventLoop);
g_main_context_unref(client->eventCtx);
+ VIR_FORCE_CLOSE(client->pipeLoopNotify[0]);
+ VIR_FORCE_CLOSE(client->pipeLoopNotify[1]);
+
g_free(client->hostname);
if (client->sock)
--
2.34.1