[libvirt] [PATCH 0/7] Simplify qemuMigrationFinish

While hacking qemuMigrationFinish I found it pretty hard to follow and revealed few bugs (patches 3 to 5) in the code. Jiri Denemark (7): qemu: Split qemuMigrationFinish qemu: Simplify qemuMigrationFinish qemu: Don't fail migration on save status failure qemu: Kill domain when migration finish fails qemu: Don't report false errors in migration protocol v2 qemuDomainEventQueue: Check if event is non-NULL qemu: Queue events in migration Finish phase ASAP src/qemu/qemu_blockjob.c | 6 +- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 +- src/qemu/qemu_driver.c | 87 +++++++------------- src/qemu/qemu_hotplug.c | 26 +++--- src/qemu/qemu_migration.c | 199 ++++++++++++++++++++++------------------------ src/qemu/qemu_process.c | 72 ++++++----------- 7 files changed, 167 insertions(+), 232 deletions(-) -- 2.4.5

Separate code which makes incoming domain persistent into qemuMigrationPersist. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 75 ++++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 27 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 97901c6..6b06e79 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5526,6 +5526,51 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) } +static int +qemuMigrationPersist(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuMigrationCookiePtr mig) +{ + virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); + virCapsPtr caps = NULL; + virDomainDefPtr vmdef; + virObjectEventPtr event; + bool newVM; + int ret = -1; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + newVM = !vm->persistent; + vm->persistent = 1; + + if (mig->persistent) + vm->newDef = mig->persistent; + + vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); + if (!vmdef) + goto cleanup; + + if (virDomainSaveConfig(cfg->configDir, vmdef) < 0) + goto cleanup; + + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_DEFINED, + newVM ? + VIR_DOMAIN_EVENT_DEFINED_ADDED : + VIR_DOMAIN_EVENT_DEFINED_UPDATED); + if (event) + qemuDomainEventQueue(driver, event); + + ret = 0; + + cleanup: + virObjectUnref(caps); + virObjectUnref(cfg); + return ret; +} + + virDomainPtr qemuMigrationFinish(virQEMUDriverPtr driver, virConnectPtr dconn, @@ -5540,13 +5585,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver, { virDomainPtr dom = NULL; virObjectEventPtr event = NULL; - bool newVM = true; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; int cookie_flags = 0; qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); - virCapsPtr caps = NULL; unsigned short port; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " @@ -5557,9 +5600,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, port = priv->migrationPort; priv->migrationPort = 0; - if (!(caps = virQEMUDriverGetCapabilities(driver, false))) - goto cleanup; - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) { qemuMigrationErrorReport(driver, vm->def->name); goto cleanup; @@ -5622,15 +5662,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, goto endjob; if (flags & VIR_MIGRATE_PERSIST_DEST) { - virDomainDefPtr vmdef; - if (vm->persistent) - newVM = false; - vm->persistent = 1; - if (mig->persistent) - vm->newDef = vmdef = mig->persistent; - else - vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); - if (!vmdef || virDomainSaveConfig(cfg->configDir, vmdef) < 0) { + bool newVM = !vm->persistent; + + if (qemuMigrationPersist(driver, vm, mig) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain * shuts down, management tools are in for a surprise. On the @@ -5654,20 +5688,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, if (newVM) vm->persistent = 0; } - if (!vmdef) - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("can't get vmdef")); goto endjob; } - - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_DEFINED, - newVM ? - VIR_DOMAIN_EVENT_DEFINED_ADDED : - VIR_DOMAIN_EVENT_DEFINED_UPDATED); - if (event) - qemuDomainEventQueue(driver, event); - event = NULL; } if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { @@ -5775,7 +5797,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, virSetError(orig_err); virFreeError(orig_err); } - virObjectUnref(caps); virObjectUnref(cfg); /* Set a special error if Finish is expected to return NULL as a result of -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:00 +0200, Jiri Denemark wrote:
Separate code which makes incoming domain persistent into qemuMigrationPersist.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 75 ++++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 27 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 97901c6..6b06e79 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5526,6 +5526,51 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) }
+static int +qemuMigrationPersist(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuMigrationCookiePtr mig) +{ + virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); + virCapsPtr caps = NULL; + virDomainDefPtr vmdef; + virObjectEventPtr event; + bool newVM; + int ret = -1; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + newVM = !vm->persistent; + vm->persistent = 1; + + if (mig->persistent) + vm->newDef = mig->persistent;
This could be done unconditionally since vm->newDef will apperently be unset.
+ + vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); + if (!vmdef) + goto cleanup; + + if (virDomainSaveConfig(cfg->configDir, vmdef) < 0) + goto cleanup; + + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_DEFINED, + newVM ? + VIR_DOMAIN_EVENT_DEFINED_ADDED : + VIR_DOMAIN_EVENT_DEFINED_UPDATED); + if (event) + qemuDomainEventQueue(driver, event); + + ret = 0; + + cleanup: + virObjectUnref(caps); + virObjectUnref(cfg); + return ret; +} + + virDomainPtr qemuMigrationFinish(virQEMUDriverPtr driver, virConnectPtr dconn,
ACK as-is. Peter

Offline migration migration is quite special because we don't really need to do anything but make the domain persistent. Let's do it separately from normal migration to avoid cluttering the code with !(flags & VIR_MIGRATE_OFFLINE). Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 72 +++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6b06e79..6a3e2e6 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5625,8 +5625,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Did the migration go as planned? If yes, return the domain * object, but if no, clean up the empty qemu process. */ - if (retcode == 0) { - if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (retcode != 0 || + qemuMigrationPersist(driver, vm, mig) < 0) + goto endjob; + + dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); + } else if (retcode == 0) { + if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); qemuMigrationErrorReport(driver, vm->def->name); @@ -5643,20 +5649,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } } - if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - goto endjob; - } - if (mig->network) - if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + goto endjob; } + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation"); if (qemuMigrationStopNBDServer(driver, vm, mig) < 0) goto endjob; @@ -5680,11 +5683,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - if (!(flags & VIR_MIGRATE_OFFLINE)) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - } + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); if (newVM) vm->persistent = 0; } @@ -5692,7 +5693,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } } - if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (!(flags & VIR_MIGRATE_PAUSED)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -5734,19 +5735,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - if (!(flags & VIR_MIGRATE_OFFLINE)) { + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, + VIR_DOMAIN_PAUSED_USER); + if (event) + qemuDomainEventQueue(driver, event); event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, - VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); - } + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } if (virDomainObjIsActive(vm) && @@ -5757,7 +5756,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); - } else if (!(flags & VIR_MIGRATE_OFFLINE)) { + } else { qemuDomainJobInfo info; /* Check for a possible error on the monitor in case Finish was called @@ -5775,11 +5774,12 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED_FAILED); } - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + endjob: + if (dom && + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_STATS) < 0) VIR_WARN("Unable to encode migration cookie"); - endjob: qemuMigrationJobFinish(driver, vm); if (!vm->persistent && !virDomainObjIsActive(vm)) qemuDomainRemoveInactive(driver, vm); -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:01 +0200, Jiri Denemark wrote:
Offline migration migration is quite special because we don't really need to do anything but make the domain persistent. Let's do it separately from normal migration to avoid cluttering the code with !(flags & VIR_MIGRATE_OFFLINE).
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 72 +++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6b06e79..6a3e2e6 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5625,8 +5625,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Did the migration go as planned? If yes, return the domain * object, but if no, clean up the empty qemu process. */ - if (retcode == 0) { - if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (retcode != 0 || + qemuMigrationPersist(driver, vm, mig) < 0) + goto endjob;
This isn't entirely equivalend to the previous impl, since if qemuMigrationPersist fails at a later stage the vm->persistent flag does get set. The code below cleared it in some cases, but in this case it would not get cleared. Either qemuMigrationPersist should get updated so that the flag is set only when everything went well, or you should clear if afterwards. Later on in the endjob section there is a check if the object is active so you'd get a persistent VM object with no definition in some cases.
+ + dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); + } else if (retcode == 0) { + if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); qemuMigrationErrorReport(driver, vm->def->name);
ACK if you fix the offline migration error handling. Peter

When we save status XML at the point during migration where we have already started the domain on destination, we can't really go back and abort migration. Thus the only thing we can do is to log a warning and report success. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 6a3e2e6..9439954 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3794,10 +3794,8 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); } - if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto cleanup; - } } done: @@ -5749,10 +5747,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } if (virDomainObjIsActive(vm) && - virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; - } /* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:02 +0200, Jiri Denemark wrote:
When we save status XML at the point during migration where we have already started the domain on destination, we can't really go back and abort migration. Thus the only thing we can do is to log a warning and report success.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
ACK, Peter

Whenever something fails during incoming migration in Finish phase before we started guest CPUs, we need to kill the domain in addition to reporting the failure. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 9439954..576b32d 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5589,6 +5589,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; + bool keep = false; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5647,15 +5648,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } } - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) goto endjob; - } + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide network data for relocation"); @@ -5681,11 +5676,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); if (newVM) vm->persistent = 0; + } else { + keep = true; } goto endjob; } @@ -5715,14 +5709,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * target in paused state, in case admin can fix * things up */ - if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - } + if (!v3proto) + keep = true; goto endjob; } if (priv->job.completed) { @@ -5761,7 +5749,12 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, &info); + } + endjob: + if (!dom && !keep && + !(flags & VIR_MIGRATE_OFFLINE) && + virDomainObjIsActive(vm)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); @@ -5770,7 +5763,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED_FAILED); } - endjob: if (dom && qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_STATS) < 0) -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:03 +0200, Jiri Denemark wrote:
Whenever something fails during incoming migration in Finish phase before we started guest CPUs, we need to kill the domain in addition to reporting the failure.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 9439954..576b32d 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5589,6 +5589,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; + bool keep = false;
VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5647,15 +5648,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } }
- if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) goto endjob; - } + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide network data for relocation");
@@ -5681,11 +5676,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); if (newVM) vm->persistent = 0;
If you choose to fix qemuMigrationPersist so that it sets the persistent flag only on succes to fix the previous patch then this will create a conflict since the above statement shouldn't be necessary.
+ } else { + keep = true; } goto endjob; }
ACK, Peter

Finish is the final state in v2 of our migration protocol. If something fails, we have no option to abort the migration and resume the original domain. Non fatal errors (such as failure to start guest CPUs or make the domain persistent) has to be treated as success. Keeping the domain running while reporting the failure was just asking for troubles. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 576b32d..bca5ad1 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5589,7 +5589,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; - bool keep = false; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5667,21 +5666,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * other hand, if we report failure, then the management tools * might try to restart the domain on the source side, even * though the domain is actually running on the destination. - * Return a NULL dom pointer, and hope that this is a rare - * situation and management tools are smart. - */ - - /* + * Pretend success and hope that this is a rare situation and + * management tools are smart. + * * However, in v3 protocol, the source VM is still available * to restart during confirm() step, so we kill it off now. */ if (v3proto) { if (newVM) vm->persistent = 0; - } else { - keep = true; + goto endjob; } - goto endjob; } } @@ -5709,9 +5704,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * target in paused state, in case admin can fix * things up */ - if (!v3proto) - keep = true; - goto endjob; + if (v3proto) + goto endjob; } if (priv->job.completed) { qemuDomainJobInfoUpdateTime(priv->job.completed); @@ -5752,7 +5746,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } endjob: - if (!dom && !keep && + if (!dom && !(flags & VIR_MIGRATE_OFFLINE) && virDomainObjIsActive(vm)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:04 +0200, Jiri Denemark wrote:
Finish is the final state in v2 of our migration protocol. If something fails, we have no option to abort the migration and resume the original domain. Non fatal errors (such as failure to start guest CPUs or make the domain persistent) has to be treated as success. Keeping the domain running while reporting the failure was just asking for troubles.
s/es/e/
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-)
ACK, Peter

Every single call to qemuDomainEventQueue() uses the following pattern: if (event) qemuDomainEventQueue(driver, event); Let's move the check for valid event to qemuDomainEventQueue and simplify all callers. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_blockjob.c | 6 ++-- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 ++-- src/qemu/qemu_driver.c | 87 ++++++++++++++++------------------------------- src/qemu/qemu_hotplug.c | 26 ++++++-------- src/qemu/qemu_migration.c | 24 +++++-------- src/qemu/qemu_process.c | 72 +++++++++++++-------------------------- 7 files changed, 78 insertions(+), 146 deletions(-) diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 8849850..1d5b7ce 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -194,10 +194,8 @@ qemuBlockJobEventProcess(virQEMUDriverPtr driver, "after block job", vm->def->name); } - if (event) - qemuDomainEventQueue(driver, event); - if (event2) - qemuDomainEventQueue(driver, event2); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event2); virObjectUnref(cfg); } diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c index 8ed74ee..733a899 100644 --- a/src/qemu/qemu_cgroup.c +++ b/src/qemu/qemu_cgroup.c @@ -696,8 +696,7 @@ qemuSetupCpuCgroup(virQEMUDriverPtr driver, event = virDomainEventTunableNewFromObj(vm, eventParams, eventNparams); } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } return 0; diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index 8b050a0..6b500c1 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -126,7 +126,8 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, void qemuDomainEventQueue(virQEMUDriverPtr driver, virObjectEventPtr event) { - virObjectEventStateQueue(driver->domainEventState, event); + if (event) + virObjectEventStateQueue(driver->domainEventState, event); } @@ -2652,8 +2653,7 @@ qemuDomainCheckRemoveOptionalDisk(virQEMUDriverPtr driver, virDomainDiskDefFree(disk); } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return 0; } diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index ea4e545..c59834a 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -976,8 +976,7 @@ static void qemuNotifyLoadDomain(virDomainObjPtr vm, int newVM, void *opaque) virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_DEFINED, VIR_DOMAIN_EVENT_DEFINED_ADDED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } } @@ -1791,8 +1790,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, virDomainObjEndAPI(&vm); if (event) { qemuDomainEventQueue(driver, event); - if (event2) - qemuDomainEventQueue(driver, event2); + qemuDomainEventQueue(driver, event2); } virObjectUnref(caps); virObjectUnref(qemuCaps); @@ -1873,8 +1871,7 @@ static int qemuDomainSuspend(virDomainPtr dom) cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -1933,8 +1930,7 @@ static int qemuDomainResume(virDomainPtr dom) cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -2251,8 +2247,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -3275,8 +3270,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, cleanup: VIR_FREE(xml); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(caps); return ret; } @@ -3777,8 +3771,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -4016,8 +4009,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_CRASHED, VIR_DOMAIN_EVENT_CRASHED_PANICKED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0) VIR_WARN("Unable to release lease on %s", vm->def->name); @@ -4050,8 +4042,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_CRASHED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virDomainAuditStop(vm, "destroyed"); @@ -4494,9 +4485,9 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } } - if ((event = virDomainEventAgentLifecycleNewFromObj(vm, newstate, - VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_CHANNEL))) - qemuDomainEventQueue(driver, event); + event = virDomainEventAgentLifecycleNewFromObj(vm, newstate, + VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_CHANNEL); + qemuDomainEventQueue(driver, event); } dev.data.chr->state = newstate; @@ -5194,8 +5185,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (cgroup_vcpu) virCgroupFree(&cgroup_vcpu); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); VIR_FREE(str); virBitmapFree(pcpumap); virObjectUnref(cfg); @@ -5391,8 +5381,7 @@ qemuDomainPinEmulator(virDomainPtr dom, cleanup: if (cgroup_emulator) virCgroupFree(&cgroup_emulator); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); VIR_FREE(str); virBitmapFree(pcpumap); virDomainObjEndAPI(&vm); @@ -5886,8 +5875,7 @@ qemuDomainPinIOThread(virDomainPtr dom, cleanup: if (cgroup_iothread) virCgroupFree(&cgroup_iothread); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); VIR_FREE(str); virBitmapFree(pcpumap); virDomainObjEndAPI(&vm); @@ -6689,8 +6677,7 @@ qemuDomainSaveImageStartVM(virConnectPtr conn, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_RESTORED); virDomainAuditStart(vm, "restored", true); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); /* If it was running before, resume it now unless caller requested pause. */ @@ -6713,8 +6700,7 @@ qemuDomainSaveImageStartVM(virConnectPtr conn, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, detail); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } ret = 0; @@ -7382,8 +7368,7 @@ qemuDomainObjStart(virConnectPtr conn, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } } } @@ -7532,8 +7517,7 @@ static virDomainPtr qemuDomainDefineXMLFlags(virConnectPtr conn, const char *xml virDomainDefFree(oldDef); virDomainDefFree(def); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(qemuCaps); virObjectUnref(caps); virObjectUnref(cfg); @@ -7650,8 +7634,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, cleanup: VIR_FREE(name); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -7799,8 +7782,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, * is in monitor */ virObjectEventPtr event; event = virDomainEventDeviceAddedNewFromObj(vm, alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } if (ret == 0) @@ -10466,8 +10448,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, if (eventNparams) { event = virDomainEventTunableNewFromDom(dom, eventParams, eventNparams); eventNparams = 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } if (flags & VIR_DOMAIN_AFFECT_CONFIG) { @@ -13677,8 +13658,7 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn, } } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -14499,8 +14479,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, virDomainAuditStop(vm, "from-snapshot"); resume = false; thaw = 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } else if (memory && pmsuspended) { /* qemu 1.3 is unable to save a domain in pm-suspended (S3) * state; so we must emit an event stating that it was @@ -14509,8 +14488,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, VIR_DOMAIN_PAUSED_FROM_SNAPSHOT); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } ret = 0; @@ -14523,8 +14501,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); if (virGetLastError() == NULL) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("resuming after snapshot failed")); @@ -15363,8 +15340,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, detail); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); goto load; } @@ -15502,8 +15478,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, start_flags |= paused ? VIR_QEMU_PROCESS_START_PAUSED : 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); rc = qemuProcessStart(snapshot->domain->conn, driver, vm, QEMU_ASYNC_JOB_NONE, NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, @@ -15563,8 +15538,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, } if (event) { qemuDomainEventQueue(driver, event); - if (event2) - qemuDomainEventQueue(driver, event2); + qemuDomainEventQueue(driver, event2); } virDomainObjEndAPI(&vm); virObjectUnref(caps); @@ -17635,8 +17609,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, if (eventNparams) { event = virDomainEventTunableNewFromDom(dom, eventParams, eventNparams); eventNparams = 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } } diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index 79338cf..5436be9 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -1809,8 +1809,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, } event = virDomainEventDeviceAddedNewFromObj(vm, objalias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); /* fix the balloon size if it was set to maximum */ if (fix_balloon) @@ -2843,8 +2842,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, virDomainAuditDisk(vm, disk->src, NULL, "detach", true); event = virDomainEventDeviceRemovedNewFromObj(vm, disk->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); for (i = 0; i < vm->def->ndisks; i++) { if (vm->def->disks[i] == disk) { @@ -2886,8 +2884,7 @@ qemuDomainRemoveControllerDevice(virQEMUDriverPtr driver, controller->info.alias, vm, vm->def->name); event = virDomainEventDeviceRemovedNewFromObj(vm, controller->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); for (i = 0; i < vm->def->ncontrollers; i++) { if (vm->def->controllers[i] == controller) { @@ -2918,8 +2915,8 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, VIR_DEBUG("Removing memory device %s from domain %p %s", mem->info.alias, vm, vm->def->name); - if ((event = virDomainEventDeviceRemovedNewFromObj(vm, mem->info.alias))) - qemuDomainEventQueue(driver, event); + event = virDomainEventDeviceRemovedNewFromObj(vm, mem->info.alias); + qemuDomainEventQueue(driver, event); if (virAsprintf(&backendAlias, "mem%s", mem->info.alias) < 0) goto cleanup; @@ -3002,8 +2999,7 @@ qemuDomainRemoveHostDevice(virQEMUDriverPtr driver, } event = virDomainEventDeviceRemovedNewFromObj(vm, hostdev->info->alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); if (hostdev->parent.type == VIR_DOMAIN_DEVICE_NET) { net = hostdev->parent.data.net; @@ -3117,8 +3113,7 @@ qemuDomainRemoveNetDevice(virQEMUDriverPtr driver, virDomainAuditNet(vm, net, NULL, "detach", true); event = virDomainEventDeviceRemovedNewFromObj(vm, net->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); for (i = 0; i < vm->def->nnets; i++) { if (vm->def->nets[i] == net) { @@ -3195,8 +3190,7 @@ qemuDomainRemoveChrDevice(virQEMUDriverPtr driver, goto cleanup; event = virDomainEventDeviceRemovedNewFromObj(vm, chr->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); qemuDomainChrRemove(vm->def, chr); virDomainChrDefFree(chr); @@ -3244,8 +3238,8 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, if (rc < 0) goto cleanup; - if ((event = virDomainEventDeviceRemovedNewFromObj(vm, rng->info.alias))) - qemuDomainEventQueue(driver, event); + event = virDomainEventDeviceRemovedNewFromObj(vm, rng->info.alias); + qemuDomainEventQueue(driver, event); if ((idx = virDomainRNGFind(vm->def, rng)) >= 0) virDomainRNGRemove(vm->def, idx); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index bca5ad1..ae7433e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2289,8 +2289,7 @@ qemuMigrationSetOffline(virQEMUDriverPtr driver, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } return ret; @@ -3454,8 +3453,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, qemuDomainRemoveInactive(driver, vm); } virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); virObjectUnref(caps); virNWFilterUnlockFilterUpdates(); @@ -3803,8 +3801,7 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver, rv = 0; cleanup: - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return rv; } @@ -5330,8 +5327,7 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -5399,8 +5395,7 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -5557,8 +5552,7 @@ qemuMigrationPersist(virQEMUDriverPtr driver, newVM ? VIR_DOMAIN_EVENT_DEFINED_ADDED : VIR_DOMAIN_EVENT_DEFINED_UPDATED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); ret = 0; @@ -5721,8 +5715,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); @@ -5772,8 +5765,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); if (orig_err) { virSetError(orig_err); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 14d3db6..3f7a9dc 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -329,8 +329,7 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); cleanup: - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } @@ -354,8 +353,7 @@ qemuProcessHandleMonitorError(qemuMonitorPtr mon ATTRIBUTE_UNUSED, ((qemuDomainObjPrivatePtr) vm->privateData)->monError = true; event = virDomainEventControlErrorNewFromObj(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnlock(vm); } @@ -518,8 +516,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; @@ -595,8 +592,7 @@ qemuProcessFakeReboot(void *opaque) if (ret == -1) ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE)); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); } @@ -645,8 +641,7 @@ qemuProcessHandleEvent(qemuMonitorPtr mon ATTRIBUTE_UNUSED, seconds, micros, details); virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return 0; } @@ -699,8 +694,7 @@ qemuProcessHandleShutdown(qemuMonitorPtr mon ATTRIBUTE_UNUSED, unlock: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; @@ -749,8 +743,7 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED, unlock: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; @@ -803,8 +796,7 @@ qemuProcessHandleResume(qemuMonitorPtr mon ATTRIBUTE_UNUSED, unlock: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -848,8 +840,7 @@ qemuProcessHandleRTCChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -910,10 +901,8 @@ qemuProcessHandleWatchdog(qemuMonitorPtr mon ATTRIBUTE_UNUSED, if (vm) virObjectUnlock(vm); - if (watchdogEvent) - qemuDomainEventQueue(driver, watchdogEvent); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, watchdogEvent); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; @@ -974,12 +963,9 @@ qemuProcessHandleIOError(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } virObjectUnlock(vm); - if (ioErrorEvent) - qemuDomainEventQueue(driver, ioErrorEvent); - if (ioErrorEvent2) - qemuDomainEventQueue(driver, ioErrorEvent2); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, ioErrorEvent); + qemuDomainEventQueue(driver, ioErrorEvent2); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; } @@ -1102,8 +1088,7 @@ qemuProcessHandleGraphics(qemuMonitorPtr mon ATTRIBUTE_UNUSED, event = virDomainEventGraphicsNewFromObj(vm, phase, localAddr, remoteAddr, authScheme, subject); virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return 0; @@ -1162,8 +1147,7 @@ qemuProcessHandleTrayChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -1201,10 +1185,8 @@ qemuProcessHandlePMWakeup(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; } @@ -1245,10 +1227,8 @@ qemuProcessHandlePMSuspend(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; } @@ -1275,8 +1255,7 @@ qemuProcessHandleBalloonChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -1317,10 +1296,8 @@ qemuProcessHandlePMSuspendDisk(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; @@ -5696,8 +5673,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, dom = NULL; } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); cleanup: return dom; -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:05 +0200, Jiri Denemark wrote:
Every single call to qemuDomainEventQueue() uses the following pattern:
if (event) qemuDomainEventQueue(driver, event);
Let's move the check for valid event to qemuDomainEventQueue and simplify all callers.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_blockjob.c | 6 ++-- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 ++-- src/qemu/qemu_driver.c | 87 ++++++++++++++++------------------------------- src/qemu/qemu_hotplug.c | 26 ++++++-------- src/qemu/qemu_migration.c | 24 +++++-------- src/qemu/qemu_process.c | 72 +++++++++++++-------------------------- 7 files changed, 78 insertions(+), 146 deletions(-)
Yay \o/, finally this is taken care of. ACK Peter

For quite a long time we don't need to postpone queueing events until the end of the function since we no longer have the big driver lock. Let's make the code of qemuMigrationFinish simpler by queuing events at the time we generate them. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index ae7433e..cc7754c 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5576,7 +5576,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, bool v3proto) { virDomainPtr dom = NULL; - virObjectEventPtr event = NULL; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; int cookie_flags = 0; @@ -5709,16 +5708,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED)); if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - qemuDomainEventQueue(driver, event); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED)); } if (virDomainObjIsActive(vm) && @@ -5745,9 +5745,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED)); } if (dom && @@ -5765,7 +5766,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); - qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); if (orig_err) { virSetError(orig_err); -- 2.4.5

On Wed, Jul 08, 2015 at 19:36:06 +0200, Jiri Denemark wrote:
For quite a long time we don't need to postpone queueing events until the end of the function since we no longer have the big driver lock. Let's make the code of qemuMigrationFinish simpler by queuing events at the time we generate them.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- src/qemu/qemu_migration.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index ae7433e..cc7754c 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c
...
@@ -5709,16 +5708,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
- event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED));
I dislike this formatting.
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - qemuDomainEventQueue(driver, event); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED)); }
if (virDomainObjIsActive(vm) &&
ACK, Peter
participants (2)
-
Jiri Denemark
-
Peter Krempa