[libvirt] [PATCH v2 0/8] Simplify and fix qemuMigrationFinish

While hacking qemuMigrationFinish I found it pretty hard to follow and revealed few bugs (patches 3 to 5, and 8) in the code. Version 2: - rebased and review comments addressed - new patch "qemu: Fix some corner cases in persistent migration" Jiri Denemark (8): qemu: Split qemuMigrationFinish qemu: Simplify qemuMigrationFinish qemu: Don't fail migration on save status failure qemu: Kill domain when migration finish fails qemu: Don't report false errors in migration protocol v2 qemuDomainEventQueue: Check if event is non-NULL qemu: Queue events in migration Finish phase ASAP qemu: Fix some corner cases in persistent migration src/qemu/qemu_blockjob.c | 6 +- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 +- src/qemu/qemu_driver.c | 87 +++++++----------- src/qemu/qemu_hotplug.c | 26 +++--- src/qemu/qemu_migration.c | 223 ++++++++++++++++++++++++---------------------- src/qemu/qemu_process.c | 72 +++++---------- 7 files changed, 187 insertions(+), 236 deletions(-) -- 2.5.1

Separate code which makes incoming domain persistent into qemuMigrationPersist. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - reset vm->persistent if copying persistent def fails src/qemu/qemu_migration.c | 78 +++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 1846239..b9e0c22 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5558,6 +5558,54 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) } +static int +qemuMigrationPersist(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuMigrationCookiePtr mig) +{ + virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); + virCapsPtr caps = NULL; + virDomainDefPtr vmdef; + virObjectEventPtr event; + bool newVM; + int ret = -1; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + newVM = !vm->persistent; + vm->persistent = 1; + + if (mig->persistent) + vm->newDef = mig->persistent; + + vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); + if (!vmdef) { + if (newVM) + vm->persistent = 0; + goto cleanup; + } + + if (virDomainSaveConfig(cfg->configDir, vmdef) < 0) + goto cleanup; + + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_DEFINED, + newVM ? + VIR_DOMAIN_EVENT_DEFINED_ADDED : + VIR_DOMAIN_EVENT_DEFINED_UPDATED); + if (event) + qemuDomainEventQueue(driver, event); + + ret = 0; + + cleanup: + virObjectUnref(caps); + virObjectUnref(cfg); + return ret; +} + + virDomainPtr qemuMigrationFinish(virQEMUDriverPtr driver, virConnectPtr dconn, @@ -5572,13 +5620,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver, { virDomainPtr dom = NULL; virObjectEventPtr event = NULL; - bool newVM = true; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; int cookie_flags = 0; qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); - virCapsPtr caps = NULL; unsigned short port; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " @@ -5589,9 +5635,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, port = priv->migrationPort; priv->migrationPort = 0; - if (!(caps = virQEMUDriverGetCapabilities(driver, false))) - goto cleanup; - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) { qemuMigrationErrorReport(driver, vm->def->name); goto cleanup; @@ -5654,15 +5697,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, goto endjob; if (flags & VIR_MIGRATE_PERSIST_DEST) { - virDomainDefPtr vmdef; - if (vm->persistent) - newVM = false; - vm->persistent = 1; - if (mig->persistent) - vm->newDef = vmdef = mig->persistent; - else - vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); - if (!vmdef || virDomainSaveConfig(cfg->configDir, vmdef) < 0) { + if (qemuMigrationPersist(driver, vm, mig) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain * shuts down, management tools are in for a surprise. On the @@ -5683,23 +5718,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); } - if (newVM) - vm->persistent = 0; } - if (!vmdef) - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("can't get vmdef")); goto endjob; } - - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_DEFINED, - newVM ? - VIR_DOMAIN_EVENT_DEFINED_ADDED : - VIR_DOMAIN_EVENT_DEFINED_UPDATED); - if (event) - qemuDomainEventQueue(driver, event); - event = NULL; } if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { @@ -5807,7 +5828,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, virSetError(orig_err); virFreeError(orig_err); } - virObjectUnref(caps); virObjectUnref(cfg); /* Set a special error if Finish is expected to return NULL as a result of -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Separate code which makes incoming domain persistent into qemuMigrationPersist.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - reset vm->persistent if copying persistent def fails
src/qemu/qemu_migration.c | 78 +++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 29 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 1846239..b9e0c22 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5558,6 +5558,54 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) }
+static int +qemuMigrationPersist(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuMigrationCookiePtr mig) +{
Could have passed cfg - your call though.
+ virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); + virCapsPtr caps = NULL; + virDomainDefPtr vmdef; + virObjectEventPtr event; + bool newVM; + int ret = -1; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + newVM = !vm->persistent; + vm->persistent = 1; + + if (mig->persistent) + vm->newDef = mig->persistent; + + vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); + if (!vmdef) { + if (newVM) + vm->persistent = 0; + goto cleanup; + } + + if (virDomainSaveConfig(cfg->configDir, vmdef) < 0) + goto cleanup;
[1]In the prior code, if virDomainSaveConfig fails, then the : "if (newVM) vm->persistent = 0" is also set, but that isn't done there. Whether that's an 'existing' bug wasn't clear... Seems reasonable otherwise - ACK John
+ + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_DEFINED, + newVM ? + VIR_DOMAIN_EVENT_DEFINED_ADDED : + VIR_DOMAIN_EVENT_DEFINED_UPDATED); + if (event) + qemuDomainEventQueue(driver, event); + + ret = 0; + + cleanup: + virObjectUnref(caps); + virObjectUnref(cfg); + return ret; +} + + virDomainPtr qemuMigrationFinish(virQEMUDriverPtr driver, virConnectPtr dconn, @@ -5572,13 +5620,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver, { virDomainPtr dom = NULL; virObjectEventPtr event = NULL; - bool newVM = true; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; int cookie_flags = 0; qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); - virCapsPtr caps = NULL; unsigned short port;
VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " @@ -5589,9 +5635,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, port = priv->migrationPort; priv->migrationPort = 0;
- if (!(caps = virQEMUDriverGetCapabilities(driver, false))) - goto cleanup; - if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) { qemuMigrationErrorReport(driver, vm->def->name); goto cleanup; @@ -5654,15 +5697,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, goto endjob;
if (flags & VIR_MIGRATE_PERSIST_DEST) { - virDomainDefPtr vmdef; - if (vm->persistent) - newVM = false; - vm->persistent = 1; - if (mig->persistent) - vm->newDef = vmdef = mig->persistent; - else - vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); - if (!vmdef || virDomainSaveConfig(cfg->configDir, vmdef) < 0) {
[1] ^^^^
+ if (qemuMigrationPersist(driver, vm, mig) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain * shuts down, management tools are in for a surprise. On the @@ -5683,23 +5718,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); } - if (newVM) - vm->persistent = 0;
[1] ^^^^
} - if (!vmdef) - virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("can't get vmdef")); goto endjob; } - - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_DEFINED, - newVM ? - VIR_DOMAIN_EVENT_DEFINED_ADDED : - VIR_DOMAIN_EVENT_DEFINED_UPDATED); - if (event) - qemuDomainEventQueue(driver, event); - event = NULL; }
if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { @@ -5807,7 +5828,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, virSetError(orig_err); virFreeError(orig_err); } - virObjectUnref(caps); virObjectUnref(cfg);
/* Set a special error if Finish is expected to return NULL as a result of

Offline migration migration is quite special because we don't really need to do anything but make the domain persistent. Let's do it separately from normal migration to avoid cluttering the code with !(flags & VIR_MIGRATE_OFFLINE). Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - rebased on top of changed patch 1 (context conflict) src/qemu/qemu_migration.c | 72 +++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index b9e0c22..48a4476 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5660,8 +5660,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Did the migration go as planned? If yes, return the domain * object, but if no, clean up the empty qemu process. */ - if (retcode == 0) { - if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (retcode != 0 || + qemuMigrationPersist(driver, vm, mig) < 0) + goto endjob; + + dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); + } else if (retcode == 0) { + if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); qemuMigrationErrorReport(driver, vm->def->name); @@ -5678,20 +5684,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } } - if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - goto endjob; - } - if (mig->network) - if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + goto endjob; } + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation"); if (qemuMigrationStopNBDServer(driver, vm, mig) < 0) goto endjob; @@ -5713,17 +5716,15 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - if (!(flags & VIR_MIGRATE_OFFLINE)) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - } + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); } goto endjob; } } - if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (!(flags & VIR_MIGRATE_PAUSED)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -5765,19 +5766,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - if (!(flags & VIR_MIGRATE_OFFLINE)) { + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, + VIR_DOMAIN_PAUSED_USER); + if (event) + qemuDomainEventQueue(driver, event); event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, - VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); - } + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); } if (virDomainObjIsActive(vm) && @@ -5788,7 +5787,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); - } else if (!(flags & VIR_MIGRATE_OFFLINE)) { + } else { qemuDomainJobInfo info; /* Check for a possible error on the monitor in case Finish was called @@ -5806,11 +5805,12 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED_FAILED); } - if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + endjob: + if (dom && + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_STATS) < 0) VIR_WARN("Unable to encode migration cookie"); - endjob: qemuMigrationJobFinish(driver, vm); if (!vm->persistent && !virDomainObjIsActive(vm)) qemuDomainRemoveInactive(driver, vm); -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Offline migration migration is quite special because we don't really need to do anything but make the domain persistent. Let's do it separately from normal migration to avoid cluttering the code with !(flags & VIR_MIGRATE_OFFLINE).
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - rebased on top of changed patch 1 (context conflict)
src/qemu/qemu_migration.c | 72 +++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index b9e0c22..48a4476 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5660,8 +5660,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Did the migration go as planned? If yes, return the domain * object, but if no, clean up the empty qemu process. */ - if (retcode == 0) { - if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (retcode != 0 || + qemuMigrationPersist(driver, vm, mig) < 0) + goto endjob; +
Should this path also update mig->jobInfo if it exists? That would have been done previously ACK otherwise John
+ dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); + } else if (retcode == 0) { + if (!virDomainObjIsActive(vm)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("guest unexpectedly quit")); qemuMigrationErrorReport(driver, vm->def->name); @@ -5678,20 +5684,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } }
- if (!(flags & VIR_MIGRATE_OFFLINE)) { - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - goto endjob; - } - if (mig->network) - if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) - VIR_WARN("unable to provide network data for relocation"); + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED); + goto endjob; } + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) + VIR_WARN("unable to provide network data for relocation");
if (qemuMigrationStopNBDServer(driver, vm, mig) < 0) goto endjob; @@ -5713,17 +5716,15 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * to restart during confirm() step, so we kill it off now. */ if (v3proto) { - if (!(flags & VIR_MIGRATE_OFFLINE)) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - } + qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, + VIR_QEMU_PROCESS_STOP_MIGRATED); + virDomainAuditStop(vm, "failed"); } goto endjob; } }
- if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (!(flags & VIR_MIGRATE_PAUSED)) { /* run 'cont' on the destination, which allows migration on qemu * >= 0.10.6 to work properly. This isn't strictly necessary on * older qemu's, but it also doesn't hurt anything there @@ -5765,19 +5766,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
- if (!(flags & VIR_MIGRATE_OFFLINE)) { + event = virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, + VIR_DOMAIN_PAUSED_USER); + if (event) + qemuDomainEventQueue(driver, event); event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); - if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { - virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, - VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); - } + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); }
if (virDomainObjIsActive(vm) && @@ -5788,7 +5787,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
/* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); - } else if (!(flags & VIR_MIGRATE_OFFLINE)) { + } else { qemuDomainJobInfo info;
/* Check for a possible error on the monitor in case Finish was called @@ -5806,11 +5805,12 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED_FAILED); }
- if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + endjob: + if (dom && + qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_STATS) < 0) VIR_WARN("Unable to encode migration cookie");
- endjob: qemuMigrationJobFinish(driver, vm); if (!vm->persistent && !virDomainObjIsActive(vm)) qemuDomainRemoveInactive(driver, vm);

On Thu, Sep 17, 2015 at 17:45:26 -0400, John Ferlan wrote:
On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Offline migration migration is quite special because we don't really need to do anything but make the domain persistent. Let's do it separately from normal migration to avoid cluttering the code with !(flags & VIR_MIGRATE_OFFLINE).
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - rebased on top of changed patch 1 (context conflict)
src/qemu/qemu_migration.c | 72 +++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index b9e0c22..48a4476 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5660,8 +5660,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, /* Did the migration go as planned? If yes, return the domain * object, but if no, clean up the empty qemu process. */ - if (retcode == 0) { - if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) { + if (flags & VIR_MIGRATE_OFFLINE) { + if (retcode != 0 || + qemuMigrationPersist(driver, vm, mig) < 0) + goto endjob; +
Should this path also update mig->jobInfo if it exists? That would have been done previously
It doesn't really make a lot of sense to do anything with jobInfo during offline migration. Jirka

When we save status XML at the point during migration where we have already started the domain on destination, we can't really go back and abort migration. Thus the only thing we can do is to log a warning and report success. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - no change src/qemu/qemu_migration.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 48a4476..3ecc1e5 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3816,10 +3816,8 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); } - if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto cleanup; - } } done: @@ -5780,10 +5778,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } if (virDomainObjIsActive(vm) && - virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; - } /* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm); -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
When we save status XML at the point during migration where we have already started the domain on destination, we can't really go back and abort migration. Thus the only thing we can do is to log a warning and report success.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - no change
src/qemu/qemu_migration.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 48a4476..3ecc1e5 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3816,10 +3816,8 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); }
- if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name);
Should these virResetLastError() too? Might even be nice to "get" that last error and (ahem) use it as part of the VIR_WARN output ;-) Seems reasonable otherwise - ACK John
- goto cleanup; - } }
done: @@ -5780,10 +5778,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, }
if (virDomainObjIsActive(vm) && - virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name); - goto endjob; - }
/* Guest is successfully running, so cancel previous auto destroy */ qemuProcessAutoDestroyRemove(driver, vm);

On Thu, Sep 17, 2015 at 17:48:21 -0400, John Ferlan wrote:
On 09/11/2015 09:26 AM, Jiri Denemark wrote:
When we save status XML at the point during migration where we have already started the domain on destination, we can't really go back and abort migration. Thus the only thing we can do is to log a warning and report success.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - no change
src/qemu/qemu_migration.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 48a4476..3ecc1e5 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -3816,10 +3816,8 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); }
- if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) { + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) VIR_WARN("Failed to save status on vm %s", vm->def->name);
Should these virResetLastError() too? Might even be nice to "get" that last error and (ahem) use it as part of the VIR_WARN output ;-)
Yeah, we could do that, although not in this patch... partially because I think there are more places like this in our code. Jirka

Whenever something fails during incoming migration in Finish phase before we started guest CPUs, we need to kill the domain in addition to reporting the failure. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - rebased on top of changed patch 1 src/qemu/qemu_migration.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 3ecc1e5..53444f7 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5624,6 +5624,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; + bool keep = false; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5682,15 +5683,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } } - if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); + if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) goto endjob; - } + if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0) VIR_WARN("unable to provide network data for relocation"); @@ -5713,11 +5708,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * However, in v3 protocol, the source VM is still available * to restart during confirm() step, so we kill it off now. */ - if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - } + if (!v3proto) + keep = true; goto endjob; } } @@ -5746,14 +5738,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * target in paused state, in case admin can fix * things up */ - if (v3proto) { - qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, - VIR_QEMU_PROCESS_STOP_MIGRATED); - virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); - } + if (!v3proto) + keep = true; goto endjob; } if (priv->job.completed) { @@ -5792,7 +5778,12 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, &info); + } + endjob: + if (!dom && !keep && + !(flags & VIR_MIGRATE_OFFLINE) && + virDomainObjIsActive(vm)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); @@ -5801,7 +5792,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED_FAILED); } - endjob: if (dom && qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, QEMU_MIGRATION_COOKIE_STATS) < 0) -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Whenever something fails during incoming migration in Finish phase before we started guest CPUs, we need to kill the domain in addition to reporting the failure.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - rebased on top of changed patch 1
src/qemu/qemu_migration.c | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-)
Although 'keep' is a bit vague, the code motion seems reasonable - ACK John

Finish is the final state in v2 of our migration protocol. If something fails, we have no option to abort the migration and resume the original domain. Non fatal errors (such as failure to start guest CPUs or make the domain persistent) has to be treated as success. Keeping the domain running while reporting the failure was just asking for trouble. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - rebased on top of changed patch 1 src/qemu/qemu_migration.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 53444f7..35ab521 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5624,7 +5624,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; - bool keep = false; VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5700,17 +5699,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * other hand, if we report failure, then the management tools * might try to restart the domain on the source side, even * though the domain is actually running on the destination. - * Return a NULL dom pointer, and hope that this is a rare - * situation and management tools are smart. - */ - - /* + * Pretend success and hope that this is a rare situation and + * management tools are smart. + * * However, in v3 protocol, the source VM is still available * to restart during confirm() step, so we kill it off now. */ - if (!v3proto) - keep = true; - goto endjob; + if (v3proto) + goto endjob; } } @@ -5738,9 +5734,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * target in paused state, in case admin can fix * things up */ - if (!v3proto) - keep = true; - goto endjob; + if (v3proto) + goto endjob; } if (priv->job.completed) { qemuDomainJobInfoUpdateTime(priv->job.completed); @@ -5781,7 +5776,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, } endjob: - if (!dom && !keep && + if (!dom && !(flags & VIR_MIGRATE_OFFLINE) && virDomainObjIsActive(vm)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Finish is the final state in v2 of our migration protocol. If something fails, we have no option to abort the migration and resume the original domain. Non fatal errors (such as failure to start guest CPUs or make the domain persistent) has to be treated as success. Keeping the domain running while reporting the failure was just asking for trouble.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - rebased on top of changed patch 1
src/qemu/qemu_migration.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-)
Well we didn't 'keep' that long ;-) (need to look-ahead more often ;-)) Seems reasonable ACK John
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 53444f7..35ab521 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5624,7 +5624,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuDomainObjPrivatePtr priv = vm->privateData; virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); unsigned short port; - bool keep = false;
VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, " "cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d", @@ -5700,17 +5699,14 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * other hand, if we report failure, then the management tools * might try to restart the domain on the source side, even * though the domain is actually running on the destination. - * Return a NULL dom pointer, and hope that this is a rare - * situation and management tools are smart. - */ - - /* + * Pretend success and hope that this is a rare situation and + * management tools are smart. + *
Close your eyes and kiss your domain goodbye... Too bad there wasn't a way to inhibit the mgmt tools from doing that start if they weren't smart, but yet still fake the success
* However, in v3 protocol, the source VM is still available * to restart during confirm() step, so we kill it off now. */ - if (!v3proto) - keep = true; - goto endjob; + if (v3proto) + goto endjob; } }
@@ -5738,9 +5734,8 @@ qemuMigrationFinish(virQEMUDriverPtr driver, * target in paused state, in case admin can fix * things up */ - if (!v3proto) - keep = true; - goto endjob; + if (v3proto) + goto endjob; } if (priv->job.completed) { qemuDomainJobInfoUpdateTime(priv->job.completed); @@ -5781,7 +5776,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, }
endjob: - if (!dom && !keep && + if (!dom && !(flags & VIR_MIGRATE_OFFLINE) && virDomainObjIsActive(vm)) { qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,

Every single call to qemuDomainEventQueue() uses the following pattern: if (event) qemuDomainEventQueue(driver, event); Let's move the check for valid event to qemuDomainEventQueue and simplify all callers. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - no change src/qemu/qemu_blockjob.c | 6 ++-- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 ++-- src/qemu/qemu_driver.c | 87 ++++++++++++++++------------------------------- src/qemu/qemu_hotplug.c | 26 ++++++-------- src/qemu/qemu_migration.c | 24 +++++-------- src/qemu/qemu_process.c | 72 +++++++++++++-------------------------- 7 files changed, 78 insertions(+), 146 deletions(-) diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 8849850..1d5b7ce 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -194,10 +194,8 @@ qemuBlockJobEventProcess(virQEMUDriverPtr driver, "after block job", vm->def->name); } - if (event) - qemuDomainEventQueue(driver, event); - if (event2) - qemuDomainEventQueue(driver, event2); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event2); virObjectUnref(cfg); } diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c index 0da6c02..570dab5 100644 --- a/src/qemu/qemu_cgroup.c +++ b/src/qemu/qemu_cgroup.c @@ -706,8 +706,7 @@ qemuSetupCpuCgroup(virQEMUDriverPtr driver, event = virDomainEventTunableNewFromObj(vm, eventParams, eventNparams); } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } return 0; diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c index c8b0ccd..0ef09b2 100644 --- a/src/qemu/qemu_domain.c +++ b/src/qemu/qemu_domain.c @@ -126,7 +126,8 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, void qemuDomainEventQueue(virQEMUDriverPtr driver, virObjectEventPtr event) { - virObjectEventStateQueue(driver->domainEventState, event); + if (event) + virObjectEventStateQueue(driver->domainEventState, event); } @@ -2732,8 +2733,7 @@ qemuDomainCheckRemoveOptionalDisk(virQEMUDriverPtr driver, virDomainDiskDefFree(disk); } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return 0; } diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 91eb661..f26564a 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -974,8 +974,7 @@ static void qemuNotifyLoadDomain(virDomainObjPtr vm, int newVM, void *opaque) virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_DEFINED, VIR_DOMAIN_EVENT_DEFINED_ADDED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } } @@ -1789,8 +1788,7 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn, virDomainObjEndAPI(&vm); if (event) { qemuDomainEventQueue(driver, event); - if (event2) - qemuDomainEventQueue(driver, event2); + qemuDomainEventQueue(driver, event2); } virObjectUnref(caps); virObjectUnref(qemuCaps); @@ -1871,8 +1869,7 @@ static int qemuDomainSuspend(virDomainPtr dom) cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -1931,8 +1928,7 @@ static int qemuDomainResume(virDomainPtr dom) cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -2252,8 +2248,7 @@ qemuDomainDestroyFlags(virDomainPtr dom, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -3276,8 +3271,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, cleanup: VIR_FREE(xml); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(caps); return ret; } @@ -3778,8 +3772,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -4017,8 +4010,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_CRASHED, VIR_DOMAIN_EVENT_CRASHED_PANICKED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0) VIR_WARN("Unable to release lease on %s", vm->def->name); @@ -4051,8 +4043,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_CRASHED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virDomainAuditStop(vm, "destroyed"); @@ -4495,9 +4486,9 @@ processSerialChangedEvent(virQEMUDriverPtr driver, } } - if ((event = virDomainEventAgentLifecycleNewFromObj(vm, newstate, - VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_CHANNEL))) - qemuDomainEventQueue(driver, event); + event = virDomainEventAgentLifecycleNewFromObj(vm, newstate, + VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_CHANNEL); + qemuDomainEventQueue(driver, event); } dev.data.chr->state = newstate; @@ -5196,8 +5187,7 @@ qemuDomainPinVcpuFlags(virDomainPtr dom, if (cgroup_vcpu) virCgroupFree(&cgroup_vcpu); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); VIR_FREE(str); virBitmapFree(pcpumap); virObjectUnref(cfg); @@ -5398,8 +5388,7 @@ qemuDomainPinEmulator(virDomainPtr dom, cleanup: if (cgroup_emulator) virCgroupFree(&cgroup_emulator); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); VIR_FREE(str); virBitmapFree(pcpumap); virDomainObjEndAPI(&vm); @@ -5899,8 +5888,7 @@ qemuDomainPinIOThread(virDomainPtr dom, cleanup: if (cgroup_iothread) virCgroupFree(&cgroup_iothread); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); VIR_FREE(str); virBitmapFree(pcpumap); virDomainObjEndAPI(&vm); @@ -6708,8 +6696,7 @@ qemuDomainSaveImageStartVM(virConnectPtr conn, VIR_DOMAIN_EVENT_STARTED, VIR_DOMAIN_EVENT_STARTED_RESTORED); virDomainAuditStart(vm, "restored", true); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); /* If it was running before, resume it now unless caller requested pause. */ @@ -6732,8 +6719,7 @@ qemuDomainSaveImageStartVM(virConnectPtr conn, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, detail); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } ret = 0; @@ -7401,8 +7387,7 @@ qemuDomainObjStart(virConnectPtr conn, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } } } @@ -7551,8 +7536,7 @@ static virDomainPtr qemuDomainDefineXMLFlags(virConnectPtr conn, const char *xml virDomainDefFree(oldDef); virDomainDefFree(def); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(qemuCaps); virObjectUnref(caps); virObjectUnref(cfg); @@ -7669,8 +7653,7 @@ qemuDomainUndefineFlags(virDomainPtr dom, cleanup: VIR_FREE(name); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -7818,8 +7801,7 @@ qemuDomainAttachDeviceLive(virDomainObjPtr vm, * is in monitor */ virObjectEventPtr event; event = virDomainEventDeviceAddedNewFromObj(vm, alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } if (ret == 0) @@ -10503,8 +10485,7 @@ qemuDomainSetSchedulerParametersFlags(virDomainPtr dom, if (eventNparams) { event = virDomainEventTunableNewFromDom(dom, eventParams, eventNparams); eventNparams = 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } if (flags & VIR_DOMAIN_AFFECT_CONFIG) { @@ -13739,8 +13720,7 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn, } } - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -14561,8 +14541,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, virDomainAuditStop(vm, "from-snapshot"); resume = false; thaw = 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } else if (memory && pmsuspended) { /* qemu 1.3 is unable to save a domain in pm-suspended (S3) * state; so we must emit an event stating that it was @@ -14571,8 +14550,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, VIR_DOMAIN_PAUSED_FROM_SNAPSHOT); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } ret = 0; @@ -14585,8 +14563,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_API_ERROR); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); if (virGetLastError() == NULL) { virReportError(VIR_ERR_OPERATION_FAILED, "%s", _("resuming after snapshot failed")); @@ -15425,8 +15402,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, detail); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); goto load; } @@ -15564,8 +15540,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, start_flags |= paused ? VIR_QEMU_PROCESS_START_PAUSED : 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); rc = qemuProcessStart(snapshot->domain->conn, driver, vm, QEMU_ASYNC_JOB_NONE, NULL, -1, NULL, NULL, VIR_NETDEV_VPORT_PROFILE_OP_CREATE, @@ -15625,8 +15600,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot, } if (event) { qemuDomainEventQueue(driver, event); - if (event2) - qemuDomainEventQueue(driver, event2); + qemuDomainEventQueue(driver, event2); } virDomainObjEndAPI(&vm); virObjectUnref(caps); @@ -17708,8 +17682,7 @@ qemuDomainSetBlockIoTune(virDomainPtr dom, if (eventNparams) { event = virDomainEventTunableNewFromDom(dom, eventParams, eventNparams); eventNparams = 0; - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } } diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index 63fafa6..0639481 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -1828,8 +1828,7 @@ qemuDomainAttachMemory(virQEMUDriverPtr driver, } event = virDomainEventDeviceAddedNewFromObj(vm, objalias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); /* fix the balloon size if it was set to maximum */ if (fix_balloon) @@ -2859,8 +2858,7 @@ qemuDomainRemoveDiskDevice(virQEMUDriverPtr driver, virDomainAuditDisk(vm, disk->src, NULL, "detach", true); event = virDomainEventDeviceRemovedNewFromObj(vm, disk->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); for (i = 0; i < vm->def->ndisks; i++) { if (vm->def->disks[i] == disk) { @@ -2902,8 +2900,7 @@ qemuDomainRemoveControllerDevice(virQEMUDriverPtr driver, controller->info.alias, vm, vm->def->name); event = virDomainEventDeviceRemovedNewFromObj(vm, controller->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); for (i = 0; i < vm->def->ncontrollers; i++) { if (vm->def->controllers[i] == controller) { @@ -2934,8 +2931,8 @@ qemuDomainRemoveMemoryDevice(virQEMUDriverPtr driver, VIR_DEBUG("Removing memory device %s from domain %p %s", mem->info.alias, vm, vm->def->name); - if ((event = virDomainEventDeviceRemovedNewFromObj(vm, mem->info.alias))) - qemuDomainEventQueue(driver, event); + event = virDomainEventDeviceRemovedNewFromObj(vm, mem->info.alias); + qemuDomainEventQueue(driver, event); if (virAsprintf(&backendAlias, "mem%s", mem->info.alias) < 0) return -1; @@ -3017,8 +3014,7 @@ qemuDomainRemoveHostDevice(virQEMUDriverPtr driver, } event = virDomainEventDeviceRemovedNewFromObj(vm, hostdev->info->alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); if (hostdev->parent.type == VIR_DOMAIN_DEVICE_NET) { net = hostdev->parent.data.net; @@ -3132,8 +3128,7 @@ qemuDomainRemoveNetDevice(virQEMUDriverPtr driver, virDomainAuditNet(vm, net, NULL, "detach", true); event = virDomainEventDeviceRemovedNewFromObj(vm, net->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); for (i = 0; i < vm->def->nnets; i++) { if (vm->def->nets[i] == net) { @@ -3210,8 +3205,7 @@ qemuDomainRemoveChrDevice(virQEMUDriverPtr driver, goto cleanup; event = virDomainEventDeviceRemovedNewFromObj(vm, chr->info.alias); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); qemuDomainChrRemove(vm->def, chr); virDomainChrDefFree(chr); @@ -3259,8 +3253,8 @@ qemuDomainRemoveRNGDevice(virQEMUDriverPtr driver, if (rc < 0) goto cleanup; - if ((event = virDomainEventDeviceRemovedNewFromObj(vm, rng->info.alias))) - qemuDomainEventQueue(driver, event); + event = virDomainEventDeviceRemovedNewFromObj(vm, rng->info.alias); + qemuDomainEventQueue(driver, event); if ((idx = virDomainRNGFind(vm->def, rng)) >= 0) virDomainRNGRemove(vm->def, idx); diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 35ab521..22c994e 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -2303,8 +2303,7 @@ qemuMigrationSetOffline(virQEMUDriverPtr driver, event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } return ret; @@ -3475,8 +3474,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, qemuDomainRemoveInactive(driver, vm); } virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); virObjectUnref(caps); virNWFilterUnlockFilterUpdates(); @@ -3825,8 +3823,7 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver, rv = 0; cleanup: - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return rv; } @@ -5362,8 +5359,7 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return ret; } @@ -5431,8 +5427,7 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver, cleanup: virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return ret; } @@ -5592,8 +5587,7 @@ qemuMigrationPersist(virQEMUDriverPtr driver, newVM ? VIR_DOMAIN_EVENT_DEFINED_ADDED : VIR_DOMAIN_EVENT_DEFINED_UPDATED); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); ret = 0; @@ -5751,8 +5745,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_SUSPENDED, VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); @@ -5802,8 +5795,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); if (orig_err) { virSetError(orig_err); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d9a0942..b16f234 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -328,8 +328,7 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED, cleanup: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); } @@ -353,8 +352,7 @@ qemuProcessHandleMonitorError(qemuMonitorPtr mon ATTRIBUTE_UNUSED, ((qemuDomainObjPrivatePtr) vm->privateData)->monError = true; event = virDomainEventControlErrorNewFromObj(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnlock(vm); } @@ -517,8 +515,7 @@ qemuProcessHandleReset(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; @@ -594,8 +591,7 @@ qemuProcessFakeReboot(void *opaque) if (ret == -1) ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE)); virDomainObjEndAPI(&vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); } @@ -644,8 +640,7 @@ qemuProcessHandleEvent(qemuMonitorPtr mon ATTRIBUTE_UNUSED, seconds, micros, details); virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return 0; } @@ -698,8 +693,7 @@ qemuProcessHandleShutdown(qemuMonitorPtr mon ATTRIBUTE_UNUSED, unlock: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; @@ -748,8 +742,7 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED, unlock: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; @@ -802,8 +795,7 @@ qemuProcessHandleResume(qemuMonitorPtr mon ATTRIBUTE_UNUSED, unlock: virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -847,8 +839,7 @@ qemuProcessHandleRTCChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -909,10 +900,8 @@ qemuProcessHandleWatchdog(qemuMonitorPtr mon ATTRIBUTE_UNUSED, if (vm) virObjectUnlock(vm); - if (watchdogEvent) - qemuDomainEventQueue(driver, watchdogEvent); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, watchdogEvent); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; @@ -973,12 +962,9 @@ qemuProcessHandleIOError(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } virObjectUnlock(vm); - if (ioErrorEvent) - qemuDomainEventQueue(driver, ioErrorEvent); - if (ioErrorEvent2) - qemuDomainEventQueue(driver, ioErrorEvent2); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, ioErrorEvent); + qemuDomainEventQueue(driver, ioErrorEvent2); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; } @@ -1101,8 +1087,7 @@ qemuProcessHandleGraphics(qemuMonitorPtr mon ATTRIBUTE_UNUSED, event = virDomainEventGraphicsNewFromObj(vm, phase, localAddr, remoteAddr, authScheme, subject); virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); return 0; @@ -1163,8 +1148,7 @@ qemuProcessHandleTrayChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -1202,10 +1186,8 @@ qemuProcessHandlePMWakeup(qemuMonitorPtr mon ATTRIBUTE_UNUSED, } virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; } @@ -1246,10 +1228,8 @@ qemuProcessHandlePMSuspend(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; } @@ -1276,8 +1256,7 @@ qemuProcessHandleBalloonChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); virObjectUnref(cfg); return 0; } @@ -1318,10 +1297,8 @@ qemuProcessHandlePMSuspendDisk(qemuMonitorPtr mon ATTRIBUTE_UNUSED, virObjectUnlock(vm); - if (event) - qemuDomainEventQueue(driver, event); - if (lifecycleEvent) - qemuDomainEventQueue(driver, lifecycleEvent); + qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, lifecycleEvent); virObjectUnref(cfg); return 0; @@ -5777,8 +5754,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom, if (!dom->persistent) qemuDomainRemoveInactive(driver, dom); - if (event) - qemuDomainEventQueue(driver, event); + qemuDomainEventQueue(driver, event); cleanup: virDomainObjEndAPI(&dom); -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Every single call to qemuDomainEventQueue() uses the following pattern:
if (event) qemuDomainEventQueue(driver, event);
Let's move the check for valid event to qemuDomainEventQueue and simplify all callers.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - no change
src/qemu/qemu_blockjob.c | 6 ++-- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 ++-- src/qemu/qemu_driver.c | 87 ++++++++++++++++------------------------------- src/qemu/qemu_hotplug.c | 26 ++++++-------- src/qemu/qemu_migration.c | 24 +++++-------- src/qemu/qemu_process.c | 72 +++++++++++++-------------------------- 7 files changed, 78 insertions(+), 146 deletions(-)
Yay! A couple still are behind "if (event) {" in qemu_driver (qemuDomainCreateXML, qemuDomainRevertToSnapshot) as well as an if (event_old) and if (event_new) in qemuDomainRename ACK - I think those can be cleaned up John

On Thu, Sep 17, 2015 at 18:24:29 -0400, John Ferlan wrote:
On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Every single call to qemuDomainEventQueue() uses the following pattern:
if (event) qemuDomainEventQueue(driver, event);
Let's move the check for valid event to qemuDomainEventQueue and simplify all callers.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - no change
src/qemu/qemu_blockjob.c | 6 ++-- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 ++-- src/qemu/qemu_driver.c | 87 ++++++++++++++++------------------------------- src/qemu/qemu_hotplug.c | 26 ++++++-------- src/qemu/qemu_migration.c | 24 +++++-------- src/qemu/qemu_process.c | 72 +++++++++++++-------------------------- 7 files changed, 78 insertions(+), 146 deletions(-)
Yay! A couple still are behind "if (event) {" in qemu_driver (qemuDomainCreateXML, qemuDomainRevertToSnapshot) as well as an if
There are two events involved in both functions ("event" and "event2") and we can send event2 only if event is not NULL, which is the reason for if (event) { qemuDomainEventQueue(driver, event); qemuDomainEventQueue(driver, event2); } It would be possible to rewrite it as qemuDomainEventQueue(driver, event); if (event) qemuDomainEventQueue(driver, event2); but I think the original version is slightly better.
(event_old) and if (event_new) in qemuDomainRename
Yeah, both came in after I originally created this patch. I will clean them before pushing. Jirka

On 09/18/2015 07:46 AM, Jiri Denemark wrote:
On Thu, Sep 17, 2015 at 18:24:29 -0400, John Ferlan wrote:
On 09/11/2015 09:26 AM, Jiri Denemark wrote:
Every single call to qemuDomainEventQueue() uses the following pattern:
if (event) qemuDomainEventQueue(driver, event);
Let's move the check for valid event to qemuDomainEventQueue and simplify all callers.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - no change
src/qemu/qemu_blockjob.c | 6 ++-- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 ++-- src/qemu/qemu_driver.c | 87 ++++++++++++++++------------------------------- src/qemu/qemu_hotplug.c | 26 ++++++-------- src/qemu/qemu_migration.c | 24 +++++-------- src/qemu/qemu_process.c | 72 +++++++++++++-------------------------- 7 files changed, 78 insertions(+), 146 deletions(-)
Yay! A couple still are behind "if (event) {" in qemu_driver (qemuDomainCreateXML, qemuDomainRevertToSnapshot) as well as an if
There are two events involved in both functions ("event" and "event2") and we can send event2 only if event is not NULL, which is the reason for if (event) { qemuDomainEventQueue(driver, event); qemuDomainEventQueue(driver, event2); }
It would be possible to rewrite it as
qemuDomainEventQueue(driver, event); if (event) qemuDomainEventQueue(driver, event2);
but I think the original version is slightly better.
Right - I did look and went back and forth in my own head a couple of times, but kept coming back to it seems event2 can only be set if event is set. Since the new code will ignore NULL - I just figured it was safe - although I'm perfectly fine with leaving it as you've coded it. IOW: I saw no path where event2 was set, but event wasn't. John

For quite a long time we don't need to postpone queueing events until the end of the function since we no longer have the big driver lock. Let's make the code of qemuMigrationFinish simpler by queuing events at the time we generate them. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - no change src/qemu/qemu_migration.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 22c994e..c761657 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -5611,7 +5611,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, bool v3proto) { virDomainPtr dom = NULL; - virObjectEventPtr event = NULL; qemuMigrationCookiePtr mig = NULL; virErrorPtr orig_err = NULL; int cookie_flags = 0; @@ -5739,16 +5738,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver, dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_RESUMED, - VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED)); if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER); - qemuDomainEventQueue(driver, event); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_SUSPENDED, - VIR_DOMAIN_EVENT_SUSPENDED_PAUSED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_SUSPENDED, + VIR_DOMAIN_EVENT_SUSPENDED_PAUSED)); } if (virDomainObjIsActive(vm) && @@ -5775,9 +5775,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, VIR_QEMU_PROCESS_STOP_MIGRATED); virDomainAuditStop(vm, "failed"); - event = virDomainEventLifecycleNewFromObj(vm, - VIR_DOMAIN_EVENT_STOPPED, - VIR_DOMAIN_EVENT_STOPPED_FAILED); + qemuDomainEventQueue(driver, + virDomainEventLifecycleNewFromObj(vm, + VIR_DOMAIN_EVENT_STOPPED, + VIR_DOMAIN_EVENT_STOPPED_FAILED)); } if (dom && @@ -5795,7 +5796,6 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); - qemuDomainEventQueue(driver, event); qemuMigrationCookieFree(mig); if (orig_err) { virSetError(orig_err); -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
For quite a long time we don't need to postpone queueing events until the end of the function since we no longer have the big driver lock. Let's make the code of qemuMigrationFinish simpler by queuing events at the time we generate them.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - no change
src/qemu/qemu_migration.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)
ACK (although not my favorite coding style) John

When persistently migrating a domain to a destination host where the same domain already exists (i.e., it is persistent and shutdown at the destination), we would happily through away the original persistent definition without properly freeing it. And when updating the definition fails for some reason we don't properly revert to the original state leaving the domain broken. In addition to fixing these issues, the patch also makes sure the domain definition parsed from a migration cookie is either used or freed. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> --- Notes: Version 2: - new patch src/qemu/qemu_migration.c | 56 +++++++++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index c761657..1d556eb 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -525,6 +525,19 @@ qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig, } +static virDomainDefPtr +qemuMigrationCookieGetPersistent(qemuMigrationCookiePtr mig) +{ + virDomainDefPtr def = mig->persistent; + + mig->persistent = NULL; + mig->flags &= ~QEMU_MIGRATION_COOKIE_PERSISTENT; + mig->flagsMandatory &= ~QEMU_MIGRATION_COOKIE_PERSISTENT; + + return def; +} + + static int qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig, virQEMUDriverPtr driver, @@ -5554,47 +5567,51 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) static int qemuMigrationPersist(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuMigrationCookiePtr mig) + qemuMigrationCookiePtr mig, + bool ignoreSaveError) { virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); virCapsPtr caps = NULL; virDomainDefPtr vmdef; + virDomainDefPtr oldDef = NULL; + unsigned int oldPersist = vm->persistent; virObjectEventPtr event; - bool newVM; int ret = -1; if (!(caps = virQEMUDriverGetCapabilities(driver, false))) goto cleanup; - newVM = !vm->persistent; vm->persistent = 1; + oldDef = vm->newDef; + vm->newDef = qemuMigrationCookieGetPersistent(mig); - if (mig->persistent) - vm->newDef = mig->persistent; + if (!(vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm))) + goto error; - vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); - if (!vmdef) { - if (newVM) - vm->persistent = 0; - goto cleanup; - } - - if (virDomainSaveConfig(cfg->configDir, vmdef) < 0) - goto cleanup; + if (virDomainSaveConfig(cfg->configDir, vmdef) < 0 && !ignoreSaveError) + goto error; event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_DEFINED, - newVM ? - VIR_DOMAIN_EVENT_DEFINED_ADDED : - VIR_DOMAIN_EVENT_DEFINED_UPDATED); + oldPersist ? + VIR_DOMAIN_EVENT_DEFINED_UPDATED : + VIR_DOMAIN_EVENT_DEFINED_ADDED); qemuDomainEventQueue(driver, event); ret = 0; cleanup: + virDomainDefFree(oldDef); virObjectUnref(caps); virObjectUnref(cfg); return ret; + + error: + virDomainDefFree(vm->newDef); + vm->persistent = oldPersist; + vm->newDef = oldDef; + oldDef = NULL; + goto cleanup; } @@ -5653,7 +5670,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, */ if (flags & VIR_MIGRATE_OFFLINE) { if (retcode != 0 || - qemuMigrationPersist(driver, vm, mig) < 0) + qemuMigrationPersist(driver, vm, mig, false) < 0) goto endjob; dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); @@ -5685,7 +5702,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, goto endjob; if (flags & VIR_MIGRATE_PERSIST_DEST) { - if (qemuMigrationPersist(driver, vm, mig) < 0) { + if (qemuMigrationPersist(driver, vm, mig, !v3proto) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain * shuts down, management tools are in for a surprise. On the @@ -5796,6 +5813,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); + virDomainDefFree(qemuMigrationCookieGetPersistent(mig)); qemuMigrationCookieFree(mig); if (orig_err) { virSetError(orig_err); -- 2.5.1

On 09/11/2015 09:26 AM, Jiri Denemark wrote:
When persistently migrating a domain to a destination host where the same domain already exists (i.e., it is persistent and shutdown at the destination), we would happily through away the original persistent
s/through/throw
definition without properly freeing it. And when updating the definition fails for some reason we don't properly revert to the original state leaving the domain broken.
In addition to fixing these issues, the patch also makes sure the domain definition parsed from a migration cookie is either used or freed.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - new patch
src/qemu/qemu_migration.c | 56 +++++++++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 19 deletions(-)
Ran using my Coverity checker... One issue - in qemuMigrationPersist can get to 'cleanup:' calling qemuMigrationCookieGetPersistent when 'mig == NULL' from either the goto in the "if (!qemuMigrationJobIsActive(vm...)" or "if (!(mig = qemuMigrationEatCookie(driver, ..." paths
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index c761657..1d556eb 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -525,6 +525,19 @@ qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig, }
+static virDomainDefPtr +qemuMigrationCookieGetPersistent(qemuMigrationCookiePtr mig) +{ + virDomainDefPtr def = mig->persistent; + + mig->persistent = NULL; + mig->flags &= ~QEMU_MIGRATION_COOKIE_PERSISTENT; + mig->flagsMandatory &= ~QEMU_MIGRATION_COOKIE_PERSISTENT; + + return def; +} + + static int qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig, virQEMUDriverPtr driver, @@ -5554,47 +5567,51 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) static int qemuMigrationPersist(virQEMUDriverPtr driver, virDomainObjPtr vm, - qemuMigrationCookiePtr mig) + qemuMigrationCookiePtr mig, + bool ignoreSaveError) { virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver); virCapsPtr caps = NULL; virDomainDefPtr vmdef; + virDomainDefPtr oldDef = NULL; + unsigned int oldPersist = vm->persistent; virObjectEventPtr event; - bool newVM; int ret = -1;
if (!(caps = virQEMUDriverGetCapabilities(driver, false))) goto cleanup;
- newVM = !vm->persistent; vm->persistent = 1; + oldDef = vm->newDef; + vm->newDef = qemuMigrationCookieGetPersistent(mig);
- if (mig->persistent) - vm->newDef = mig->persistent; + if (!(vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm))) + goto error;
- vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm); - if (!vmdef) { - if (newVM) - vm->persistent = 0; - goto cleanup; - } - - if (virDomainSaveConfig(cfg->configDir, vmdef) < 0) - goto cleanup; + if (virDomainSaveConfig(cfg->configDir, vmdef) < 0 && !ignoreSaveError) + goto error;
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_DEFINED, - newVM ? - VIR_DOMAIN_EVENT_DEFINED_ADDED : - VIR_DOMAIN_EVENT_DEFINED_UPDATED); + oldPersist ? + VIR_DOMAIN_EVENT_DEFINED_UPDATED : + VIR_DOMAIN_EVENT_DEFINED_ADDED); qemuDomainEventQueue(driver, event);
ret = 0;
cleanup: + virDomainDefFree(oldDef); virObjectUnref(caps); virObjectUnref(cfg); return ret; + + error: + virDomainDefFree(vm->newDef); + vm->persistent = oldPersist;
This set of changes resolves what I pointed out in patch 1 regarding setting vm->persistent
+ vm->newDef = oldDef; + oldDef = NULL; + goto cleanup; }
@@ -5653,7 +5670,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, */ if (flags & VIR_MIGRATE_OFFLINE) { if (retcode != 0 || - qemuMigrationPersist(driver, vm, mig) < 0) + qemuMigrationPersist(driver, vm, mig, false) < 0) goto endjob;
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid); @@ -5685,7 +5702,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, goto endjob;
if (flags & VIR_MIGRATE_PERSIST_DEST) { - if (qemuMigrationPersist(driver, vm, mig) < 0) { + if (qemuMigrationPersist(driver, vm, mig, !v3proto) < 0) { /* Hmpf. Migration was successful, but making it persistent * was not. If we report successful, then when this domain * shuts down, management tools are in for a surprise. On the @@ -5796,6 +5813,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); + virDomainDefFree(qemuMigrationCookieGetPersistent(mig));
If this has a "if (mig)", then Coverity is happy. ACK with the adjustment John
qemuMigrationCookieFree(mig); if (orig_err) { virSetError(orig_err);

On Thu, Sep 17, 2015 at 18:40:59 -0400, John Ferlan wrote:
On 09/11/2015 09:26 AM, Jiri Denemark wrote:
When persistently migrating a domain to a destination host where the same domain already exists (i.e., it is persistent and shutdown at the destination), we would happily through away the original persistent
s/through/throw
definition without properly freeing it. And when updating the definition fails for some reason we don't properly revert to the original state leaving the domain broken.
In addition to fixing these issues, the patch also makes sure the domain definition parsed from a migration cookie is either used or freed.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com> ---
Notes: Version 2: - new patch
src/qemu/qemu_migration.c | 56 +++++++++++++++++++++++++++++++---------------- 1 file changed, 37 insertions(+), 19 deletions(-)
Ran using my Coverity checker...
One issue - in qemuMigrationPersist can get to 'cleanup:' calling qemuMigrationCookieGetPersistent when 'mig == NULL' from either the goto in the "if (!qemuMigrationJobIsActive(vm...)" or "if (!(mig = qemuMigrationEatCookie(driver, ..." paths
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index c761657..1d556eb 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c
...
@@ -5796,6 +5813,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, qemuMonitorSetDomainLog(priv->mon, -1); VIR_FREE(priv->origname); virDomainObjEndAPI(&vm); + virDomainDefFree(qemuMigrationCookieGetPersistent(mig));
If this has a "if (mig)", then Coverity is happy.
Hmm, coverity is right. Jirka

On Fri, Sep 11, 2015 at 15:26:02 +0200, Jiri Denemark wrote:
While hacking qemuMigrationFinish I found it pretty hard to follow and revealed few bugs (patches 3 to 5, and 8) in the code.
Version 2: - rebased and review comments addressed - new patch "qemu: Fix some corner cases in persistent migration"
Jiri Denemark (8): qemu: Split qemuMigrationFinish qemu: Simplify qemuMigrationFinish qemu: Don't fail migration on save status failure qemu: Kill domain when migration finish fails qemu: Don't report false errors in migration protocol v2 qemuDomainEventQueue: Check if event is non-NULL qemu: Queue events in migration Finish phase ASAP qemu: Fix some corner cases in persistent migration
src/qemu/qemu_blockjob.c | 6 +- src/qemu/qemu_cgroup.c | 3 +- src/qemu/qemu_domain.c | 6 +- src/qemu/qemu_driver.c | 87 +++++++----------- src/qemu/qemu_hotplug.c | 26 +++--- src/qemu/qemu_migration.c | 223 ++++++++++++++++++++++++---------------------- src/qemu/qemu_process.c | 72 +++++---------- 7 files changed, 187 insertions(+), 236 deletions(-)
Pushed with the suggested changes. Jirka
participants (2)
-
Jiri Denemark
-
John Ferlan