[libvirt] [PATCH 1/1] qemu: Tidy up job handling during live migration

During a QEMU live migration several warning messages about job handling could be written to syslog on the destination host: "entering monitor without asking for a nested job is dangerous" The messages are written because the job handling during migration uses hard coded asyncJob values in several places that are incorrect. This patch passes the required asyncJob value around and prevents the warnings as well as any issues that the warnings may be referring to. Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com> --- src/qemu/qemu_domain.c | 5 +++-- src/qemu/qemu_domain.h | 2 +- src/qemu/qemu_driver.c | 21 ++++++++++++--------- src/qemu/qemu_migration.c | 3 ++- src/qemu/qemu_process.c | 33 ++++++++++++++++++--------------- src/qemu/qemu_process.h | 1 + 6 files changed, 37 insertions(+), 28 deletions(-) diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 4f63c88..3abbb14 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -2497,7 +2497,8 @@ qemuDomainDetermineDiskChain(virQEMUDriverPtr driver, int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, - virDomainObjPtr vm) + virDomainObjPtr vm, + int asyncJob) { qemuDomainObjPrivatePtr priv = vm->privateData; char **aliases; @@ -2505,7 +2506,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT)) return 0; - qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob)); if (qemuMonitorGetDeviceAliases(priv->mon, &aliases) < 0) { qemuDomainObjExitMonitor(driver, vm); return -1; diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h index 67972b9..8736889 100644 --- a/src/qemu/qemu_domain.h +++ b/src/qemu/qemu_domain.h @@ -369,7 +369,7 @@ extern virDomainXMLNamespace virQEMUDriverDomainXMLNamespace; extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig; int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, - virDomainObjPtr vm); + virDomainObjPtr vm, int asyncJob); bool qemuDomainDefCheckABIStability(virQEMUDriverPtr driver, virDomainDefPtr src, diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 33541d3..b0439d2 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -1616,7 +1616,8 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, goto cleanup; } - if (qemuProcessStart(conn, driver, vm, NULL, -1, NULL, NULL, + if (qemuProcessStart(conn, driver, vm, QEMU_ASYNC_JOB_NONE, + NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags) < 0) { virDomainAuditStart(vm, "booted", false); @@ -5446,7 +5447,8 @@ qemuDomainSaveImageStartVM(virConnectPtr conn, } /* Set the migration source and start it up. */ - ret = qemuProcessStart(conn, driver, vm, "stdio", *fd, path, NULL, + ret = qemuProcessStart(conn, driver, vm, QEMU_ASYNC_JOB_NONE, + "stdio", *fd, path, NULL, VIR_NETDEV_VPORT_PROFILE_OP_RESTORE, VIR_QEMU_PROCESS_START_PAUSED); @@ -6143,7 +6145,8 @@ qemuDomainObjStart(virConnectPtr conn, } } - ret = qemuProcessStart(conn, driver, vm, NULL, -1, NULL, NULL, + ret = qemuProcessStart(conn, driver, vm, QEMU_ASYNC_JOB_NONE, + NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags); virDomainAuditStart(vm, "booted", ret >= 0); if (ret >= 0) { @@ -6500,7 +6503,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, } if (ret == 0) - qemuDomainUpdateDeviceList(driver, vm); + qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE); return ret; } @@ -6560,7 +6563,7 @@ qemuDomainDetachDeviceLive(virDomainObjPtr vm, } if (ret == 0) - qemuDomainUpdateDeviceList(driver, vm); + qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE); return ret; } @@ -14101,8 +14104,8 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, if (config) virDomainObjAssignDef(vm, config, false, NULL); - rc = qemuProcessStart(snapshot->domain->conn, - driver, vm, NULL, -1, NULL, snap, + rc = qemuProcessStart(snapshot->domain->conn, driver, vm, + QEMU_ASYNC_JOB_NONE, NULL, -1, NULL, snap, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, VIR_QEMU_PROCESS_START_PAUSED); virDomainAuditStart(vm, "from-snapshot", rc >= 0); @@ -14195,8 +14198,8 @@ static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, if (event) qemuDomainEventQueue(driver, event); - rc = qemuProcessStart(snapshot->domain->conn, - driver, vm, NULL, -1, NULL, NULL, + rc = qemuProcessStart(snapshot->domain->conn, driver, vm, + QEMU_ASYNC_JOB_NONE, NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, start_flags); virDomainAuditStart(vm, "from-snapshot", rc >= 0); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 767d840..1c46b34 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2480,7 +2480,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, /* Start the QEMU daemon, with the same command-line arguments plus * -incoming $migrateFrom */ - if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL, + if (qemuProcessStart(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, + migrateFrom, dataFD[0], NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START, VIR_QEMU_PROCESS_START_PAUSED | VIR_QEMU_PROCESS_START_AUTODESTROY) < 0) { diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 8a6b384..229de6d 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1444,7 +1444,8 @@ static qemuMonitorCallbacks monitorCallbacks = { }; static int -qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int logfd) +qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob, + int logfd) { qemuDomainObjPrivatePtr priv = vm->privateData; int ret = -1; @@ -1495,7 +1496,7 @@ qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int logfd) } - qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob)); ret = qemuMonitorSetCapabilities(priv->mon); if (ret == 0 && virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) @@ -1901,6 +1902,7 @@ qemuProcessFindCharDevicePTYs(virDomainObjPtr vm, static int qemuProcessWaitForMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, + int asyncJob, virQEMUCapsPtr qemuCaps, off_t pos) { @@ -1926,7 +1928,7 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, } VIR_DEBUG("Connect monitor to %p '%s'", vm, vm->def->name); - if (qemuConnectMonitor(driver, vm, logfd) < 0) + if (qemuConnectMonitor(driver, vm, asyncJob, logfd) < 0) goto cleanup; /* Try to get the pty path mappings again via the monitor. This is much more @@ -1938,7 +1940,7 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, goto cleanup; priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob)); ret = qemuMonitorGetPtyPaths(priv->mon, paths); qemuDomainObjExitMonitor(driver, vm); @@ -1984,13 +1986,13 @@ qemuProcessWaitForMonitor(virQEMUDriverPtr driver, static int qemuProcessDetectVcpuPIDs(virQEMUDriverPtr driver, - virDomainObjPtr vm) + virDomainObjPtr vm, int asyncJob) { pid_t *cpupids = NULL; int ncpupids; qemuDomainObjPrivatePtr priv = vm->privateData; - qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob)); /* failure to get the VCPU<-> PID mapping or to execute the query * command will not be treated fatal as some versions of qemu don't * support this command */ @@ -3150,7 +3152,7 @@ qemuProcessUpdateDevices(virQEMUDriverPtr driver, old = priv->qemuDevices; priv->qemuDevices = NULL; - if (qemuDomainUpdateDeviceList(driver, vm) < 0) + if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) goto cleanup; if ((tmp = old)) { @@ -3216,7 +3218,7 @@ qemuProcessReconnect(void *opaque) virObjectRef(obj); /* XXX check PID liveliness & EXE path */ - if (qemuConnectMonitor(driver, obj, -1) < 0) + if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, -1) < 0) goto error; /* Failure to connect to agent shouldn't be fatal */ @@ -3655,6 +3657,7 @@ qemuProcessVerifyGuestCPU(virQEMUDriverPtr driver, virDomainObjPtr vm) int qemuProcessStart(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, + int asyncJob, const char *migrateFrom, int stdin_fd, const char *stdin_path, @@ -4137,7 +4140,7 @@ int qemuProcessStart(virConnectPtr conn, goto cleanup; VIR_DEBUG("Waiting for monitor to show up"); - if (qemuProcessWaitForMonitor(driver, vm, priv->qemuCaps, pos) < 0) + if (qemuProcessWaitForMonitor(driver, vm, asyncJob, priv->qemuCaps, pos) < 0) goto cleanup; /* Failure to connect to agent shouldn't be fatal */ @@ -4160,7 +4163,7 @@ int qemuProcessStart(virConnectPtr conn, goto cleanup; VIR_DEBUG("Detecting VCPU PIDs"); - if (qemuProcessDetectVcpuPIDs(driver, vm) < 0) + if (qemuProcessDetectVcpuPIDs(driver, vm, asyncJob) < 0) goto cleanup; VIR_DEBUG("Setting cgroup for each VCPU (if required)"); @@ -4195,7 +4198,7 @@ int qemuProcessStart(virConnectPtr conn, /* qemu doesn't support setting this on the command line, so * enter the monitor */ VIR_DEBUG("Setting network link states"); - qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob)); if (qemuProcessSetLinkStates(vm) < 0) { qemuDomainObjExitMonitor(driver, vm); goto cleanup; @@ -4204,7 +4207,7 @@ int qemuProcessStart(virConnectPtr conn, qemuDomainObjExitMonitor(driver, vm); VIR_DEBUG("Fetching list of active devices"); - if (qemuDomainUpdateDeviceList(driver, vm) < 0) + if (qemuDomainUpdateDeviceList(driver, vm, asyncJob) < 0) goto cleanup; /* Technically, qemuProcessStart can be called from inside @@ -4219,7 +4222,7 @@ int qemuProcessStart(virConnectPtr conn, vm->def->mem.cur_balloon); goto cleanup; } - qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob)); if (vm->def->memballoon && vm->def->memballoon->period) qemuMonitorSetMemoryStatsPeriod(priv->mon, vm->def->memballoon->period); if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) { @@ -4764,7 +4767,7 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED, vm->pid = pid; VIR_DEBUG("Waiting for monitor to show up"); - if (qemuProcessWaitForMonitor(driver, vm, priv->qemuCaps, -1) < 0) + if (qemuProcessWaitForMonitor(driver, vm, QEMU_ASYNC_JOB_NONE, priv->qemuCaps, -1) < 0) goto error; /* Failure to connect to agent shouldn't be fatal */ @@ -4779,7 +4782,7 @@ int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED, } VIR_DEBUG("Detecting VCPU PIDs"); - if (qemuProcessDetectVcpuPIDs(driver, vm) < 0) + if (qemuProcessDetectVcpuPIDs(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) goto error; /* If we have -device, then addresses are assigned explicitly. diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 9c78736..5948ea4 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -53,6 +53,7 @@ typedef enum { int qemuProcessStart(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, + int asyncJob, const char *migrateFrom, int stdin_fd, const char *stdin_path, -- 2.0.2.731.g247b4d5

On 07/29/2014 02:41 AM, Sam Bobroff wrote:
During a QEMU live migration several warning messages about job handling could be written to syslog on the destination host:
"entering monitor without asking for a nested job is dangerous"
The messages are written because the job handling during migration uses hard coded asyncJob values in several places that are incorrect.
This patch passes the required asyncJob value around and prevents the warnings as well as any issues that the warnings may be referring to.
Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com> --
This patch seems to fix the deadlock that can happen if the migrated domain is destroyed at the destination reported here: https://www.redhat.com/archives/libvir-list/2014-May/msg00236.html It looks good to me, but it seems there are more functions calling qemuDomainObjEnterMonitor that can be called from qemuProcessStart, for example qemuDomainChangeGraphicsPasswords.
@@ -2505,7 +2506,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT)) return 0;
- qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob));
Also, the return value of this call could be dangerous to ignore if asyncJob != NONE. Jan

On 30/07/14 01:52, Ján Tomko wrote:
On 07/29/2014 02:41 AM, Sam Bobroff wrote:
During a QEMU live migration several warning messages about job handling could be written to syslog on the destination host:
"entering monitor without asking for a nested job is dangerous"
The messages are written because the job handling during migration uses hard coded asyncJob values in several places that are incorrect.
This patch passes the required asyncJob value around and prevents the warnings as well as any issues that the warnings may be referring to.
Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com> --
This patch seems to fix the deadlock that can happen if the migrated domain is destroyed at the destination reported here: https://www.redhat.com/archives/libvir-list/2014-May/msg00236.html
It looks good to me, but it seems there are more functions calling qemuDomainObjEnterMonitor that can be called from qemuProcessStart, for example qemuDomainChangeGraphicsPasswords.
Yes, I was fairly sure there would be other cases; I just fixed all the ones that actually occurred during my tests. In fact it seems like for the cases I'm looking at here, where it's the async job owner thread that's using the EnterMonitor functions, passing asyncJob around is a waste of time anyway because we know the correct value of asyncJob to use: it's stored in priv->job.asyncJob. Why not have qemuDomainObjEnterMonitorInternal() automatically switch to creating a nested job in this case? It seems easy to do and would simplify some code as well; what do you think?
@@ -2505,7 +2506,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT)) return 0;
- qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob));
Also, the return value of this call could be dangerous to ignore if asyncJob != NONE.
True, but the patch hasn't introduced this, and the full story is even worse ;-) void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, virDomainObjPtr obj) { ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj, QEMU_ASYNC_JOB_NONE)); }
Jan
Cheers, Sam.

On 08/01/2014 03:12 AM, Sam Bobroff wrote:
On 30/07/14 01:52, Ján Tomko wrote:
On 07/29/2014 02:41 AM, Sam Bobroff wrote:
During a QEMU live migration several warning messages about job handling could be written to syslog on the destination host:
"entering monitor without asking for a nested job is dangerous"
The messages are written because the job handling during migration uses hard coded asyncJob values in several places that are incorrect.
This patch passes the required asyncJob value around and prevents the warnings as well as any issues that the warnings may be referring to.
Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com> --
This patch seems to fix the deadlock that can happen if the migrated domain is destroyed at the destination reported here: https://www.redhat.com/archives/libvir-list/2014-May/msg00236.html
It looks good to me, but it seems there are more functions calling qemuDomainObjEnterMonitor that can be called from qemuProcessStart, for example qemuDomainChangeGraphicsPasswords.
Yes, I was fairly sure there would be other cases; I just fixed all the ones that actually occurred during my tests.
In fact it seems like for the cases I'm looking at here, where it's the async job owner thread that's using the EnterMonitor functions, passing asyncJob around is a waste of time anyway because we know the correct value of asyncJob to use: it's stored in priv->job.asyncJob.
Why not have qemuDomainObjEnterMonitorInternal() automatically switch to creating a nested job in this case?
It seems easy to do and would simplify some code as well; what do you think?
We've had it that way before - it didn't work that well: http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=193cd0f3
@@ -2505,7 +2506,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT)) return 0;
- qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob));
Also, the return value of this call could be dangerous to ignore if asyncJob != NONE.
True, but the patch hasn't introduced this, and the full story is even worse ;-)
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, virDomainObjPtr obj) { ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj, QEMU_ASYNC_JOB_NONE)); }
qemuDomainObjEnterMonitorInternal is called with QEMU_ASYNC_JOB_NONE here. It always returns 0 in that case and it's safe to ignore. The problem is when you use other async jobs: static int qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData; if (asyncJob != QEMU_ASYNC_JOB_NONE) { int ret; if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); /* Still referenced by the containing async job. */ ignore_value(qemuDomainObjEndJob(driver, obj)); return -1; } } ... Jan

On 02/08/14 00:04, Ján Tomko wrote:
On 08/01/2014 03:12 AM, Sam Bobroff wrote:
On 30/07/14 01:52, Ján Tomko wrote:
On 07/29/2014 02:41 AM, Sam Bobroff wrote:
During a QEMU live migration several warning messages about job handling could be written to syslog on the destination host:
"entering monitor without asking for a nested job is dangerous"
The messages are written because the job handling during migration uses hard coded asyncJob values in several places that are incorrect.
This patch passes the required asyncJob value around and prevents the warnings as well as any issues that the warnings may be referring to.
Signed-off-by: Sam Bobroff <sam.bobroff@au1.ibm.com> --
This patch seems to fix the deadlock that can happen if the migrated domain is destroyed at the destination reported here: https://www.redhat.com/archives/libvir-list/2014-May/msg00236.html
It looks good to me, but it seems there are more functions calling qemuDomainObjEnterMonitor that can be called from qemuProcessStart, for example qemuDomainChangeGraphicsPasswords.
Yes, I was fairly sure there would be other cases; I just fixed all the ones that actually occurred during my tests.
In fact it seems like for the cases I'm looking at here, where it's the async job owner thread that's using the EnterMonitor functions, passing asyncJob around is a waste of time anyway because we know the correct value of asyncJob to use: it's stored in priv->job.asyncJob.
Why not have qemuDomainObjEnterMonitorInternal() automatically switch to creating a nested job in this case?
It seems easy to do and would simplify some code as well; what do you think?
We've had it that way before - it didn't work that well: http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=193cd0f3
Interesting. I'll stick to the simpler fix for now then.
@@ -2505,7 +2506,7 @@ qemuDomainUpdateDeviceList(virQEMUDriverPtr driver, if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT)) return 0;
- qemuDomainObjEnterMonitor(driver, vm); + ignore_value(qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob));
Also, the return value of this call could be dangerous to ignore if asyncJob != NONE.
True, but the patch hasn't introduced this, and the full story is even worse ;-)
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver, virDomainObjPtr obj) { ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj, QEMU_ASYNC_JOB_NONE)); }
qemuDomainObjEnterMonitorInternal is called with QEMU_ASYNC_JOB_NONE here. It always returns 0 in that case and it's safe to ignore. The problem is when you use other async jobs:
static int qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver, virDomainObjPtr obj, qemuDomainAsyncJob asyncJob) { qemuDomainObjPrivatePtr priv = obj->privateData;
if (asyncJob != QEMU_ASYNC_JOB_NONE) { int ret; if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0) return ret; if (!virDomainObjIsActive(obj)) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("domain is no longer running")); /* Still referenced by the containing async job. */ ignore_value(qemuDomainObjEndJob(driver, obj)); return -1; } } ...
Ah thanks, I see what you mean. I'll post an updated version of my patch that handles error results and I'll include the qemuDomainChangeGraphicsPasswords() (via qemuProcessInitPasswords()) function you mentioned above as well. I'd be happy to update other functions as well, but I can't see a simple way of finding every way that qemuProcessStart() might end up calling qemuDomainObjEnterMonitorInternal(). If you can recommend one I'll handle them as well, but otherwise would you accept the patch with only the known ones fixed? (It would at least be an improvement and would make it easy to fix the other cases as they are found.)
Jan
Cheers, Sam.
participants (2)
-
Ján Tomko
-
Sam Bobroff