When libvirtd is restarted during an active outgoing migration (or
snapshot, save, or dump which are internally implemented as migration)
it wants to cancel the migration. But by a mistake in commit
v8.7.0-57-g2d7b22b561 the qemuMigrationSrcCancel function is called with
wait == true, which leads to an instant crash by dereferencing NULL
pointer stored in priv->job.current.
When canceling migration to file (snapshot, save, dump), we don't need
to wait until it is really canceled as no migration capabilities or
parameters need to be restored.
On the other hand we need to wait when canceling outgoing migration and
since we don't have virDomainJobData at this point, we have to
temporarily restore the migration job to make sure we can process
MIGRATION events from QEMU.
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_migration.c | 28 +++++++++++++++++++++++-----
src/qemu/qemu_migration.h | 3 ++-
src/qemu/qemu_process.c | 4 ++--
3 files changed, 27 insertions(+), 8 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 21c870334d..76e486fbc7 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -4633,8 +4633,7 @@ qemuMigrationSrcIsCanceled(virDomainObj *vm)
* cancellation to complete.
*
* The thread (the caller itself in most cases) which is watching the migration
- * will do all the cleanup once migration is canceled. If no thread is watching
- * the migration, use qemuMigrationSrcCancelUnattended instead.
+ * will do all the cleanup once migration is canceled.
*/
int
qemuMigrationSrcCancel(virDomainObj *vm,
@@ -6979,11 +6978,12 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
/**
- * This function is supposed to be used only when no other thread is watching
- * the migration.
+ * This function is supposed to be used only to while reconnecting to a domain
+ * with an active migration job.
*/
int
-qemuMigrationSrcCancelUnattended(virDomainObj *vm)
+qemuMigrationSrcCancelUnattended(virDomainObj *vm,
+ virDomainJobObj *oldJob)
{
bool storage = false;
size_t i;
@@ -6991,8 +6991,26 @@ qemuMigrationSrcCancelUnattended(virDomainObj *vm)
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name);
+ /* Make sure MIGRATION event handler can store the current migration state
+ * in the job.
+ */
+ if (!vm->job->current) {
+ qemuDomainObjRestoreAsyncJob(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
+ oldJob->phase, oldJob->asyncStarted,
+ VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT,
+ QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
+ VIR_DOMAIN_JOB_STATUS_FAILED,
+ VIR_JOB_NONE);
+ }
+
+ /* We're inside a MODIFY job and the restored MIGRATION_OUT async job is
+ * used only for processing migration events from QEMU. Thus we don't want
+ * to start a nested job for talking to QEMU.
+ */
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true);
+ virDomainObjEndAsyncJob(vm);
+
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDef *disk = vm->def->disks[i];
qemuDomainDiskPrivate *diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index fbea45ad4e..3d7c2702aa 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -241,7 +241,8 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
int
-qemuMigrationSrcCancelUnattended(virDomainObj *vm);
+qemuMigrationSrcCancelUnattended(virDomainObj *vm,
+ virDomainJobObj *oldJob);
int
qemuMigrationSrcCancel(virDomainObj *vm,
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index e6fd9395de..a8101e1233 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -3532,7 +3532,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
*/
VIR_DEBUG("Cancelling unfinished migration of domain %s",
vm->def->name);
- if (qemuMigrationSrcCancelUnattended(vm) < 0) {
+ if (qemuMigrationSrcCancelUnattended(vm, job) < 0) {
VIR_WARN("Could not cancel ongoing migration of domain %s",
vm->def->name);
}
@@ -3691,7 +3691,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
case VIR_ASYNC_JOB_SAVE:
case VIR_ASYNC_JOB_DUMP:
case VIR_ASYNC_JOB_SNAPSHOT:
- qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true);
+ qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, false);
/* resume the domain but only if it was paused as a result of
* running a migration-to-file operation. Although we are
* recovering an async job, this function is run at startup
--
2.38.0