When libvirt daemon is restarted during an active post-copy migration,
we do not always mark the migration as broken. In this phase libvirt is
not really needed for migration to finish successfully. In fact the
migration could have even finished while libvirt was not running or it
may still be happily running.
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_migration.c | 27 +++++++++++++++++++++++++++
src/qemu/qemu_migration.h | 6 ++++++
src/qemu/qemu_process.c | 39 +++++++++++++++++++++++++++++----------
3 files changed, 62 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index dacea63610..854dfd43c1 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -2460,6 +2460,33 @@ qemuMigrationSrcBeginPhaseBlockDirtyBitmaps(qemuMigrationCookie
*mig,
}
+int
+qemuMigrationAnyRefreshStatus(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ virDomainJobStatus *status)
+{
+ g_autoptr(virDomainJobData) jobData = NULL;
+ qemuDomainJobDataPrivate *priv;
+
+ jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
+ priv = jobData->privateData;
+
+ if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, NULL) < 0)
+ return -1;
+
+ qemuMigrationUpdateJobType(jobData);
+ VIR_DEBUG("QEMU reports domain '%s' is in '%s' migration state,
"
+ "translated as %d",
+ vm->def->name,
+ qemuMonitorMigrationStatusTypeToString(priv->stats.mig.status),
+ jobData->status);
+
+ *status = jobData->status;
+ return 0;
+}
+
+
/* The caller is supposed to lock the vm and start a migration job. */
static char *
qemuMigrationSrcBeginPhase(virQEMUDriver *driver,
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index eeb69a52bf..9351d6ac51 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -279,3 +279,9 @@ qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
virDomainAsyncJob asyncJob,
virDomainJobData *jobData);
+
+int
+qemuMigrationAnyRefreshStatus(virQEMUDriver *driver,
+ virDomainObj *vm,
+ virDomainAsyncJob asyncJob,
+ virDomainJobStatus *status);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 7b347a9061..1cb00af6f1 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -3591,10 +3591,8 @@ qemuProcessRecoverMigrationIn(virQEMUDriver *driver,
/* migration finished, we started resuming the domain but didn't
* confirm success or failure yet; killing it seems safest unless
* we already started guest CPUs or we were in post-copy mode */
- if (virDomainObjIsPostcopy(vm, VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN)) {
- qemuMigrationDstPostcopyFailed(vm);
+ if (virDomainObjIsPostcopy(vm, VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN))
return 1;
- }
if (state != VIR_DOMAIN_RUNNING) {
VIR_DEBUG("Killing migrated domain %s", vm->def->name);
@@ -3661,10 +3659,8 @@ qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
* of Finish3 step; third party needs to check what to do next; in
* post-copy mode we can use PAUSED_POSTCOPY_FAILED state for this
*/
- if (postcopy) {
- qemuMigrationSrcPostcopyFailed(vm);
+ if (postcopy)
return 1;
- }
break;
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
@@ -3672,10 +3668,8 @@ qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
* post-copy mode there's no way back, so let's just mark the domain
* as broken in that case
*/
- if (postcopy) {
- qemuMigrationSrcPostcopyFailed(vm);
+ if (postcopy)
return 1;
- }
VIR_DEBUG("Resuming domain %s after failed migration",
vm->def->name);
@@ -3713,6 +3707,7 @@ qemuProcessRecoverMigration(virQEMUDriver *driver,
qemuDomainJobObj *job,
unsigned int *stopFlags)
{
+ virDomainJobStatus migStatus = VIR_DOMAIN_JOB_STATUS_NONE;
qemuDomainJobPrivate *jobPriv = job->privateData;
virDomainState state;
int reason;
@@ -3720,6 +3715,8 @@ qemuProcessRecoverMigration(virQEMUDriver *driver,
state = virDomainObjGetState(vm, &reason);
+ qemuMigrationAnyRefreshStatus(driver, vm, VIR_ASYNC_JOB_NONE, &migStatus);
+
if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
rc = qemuProcessRecoverMigrationOut(driver, vm, job,
state, reason, stopFlags);
@@ -3731,7 +3728,29 @@ qemuProcessRecoverMigration(virQEMUDriver *driver,
return -1;
if (rc > 0) {
- qemuProcessRestoreMigrationJob(vm, job);
+ if (migStatus == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
+ VIR_DEBUG("Post-copy migration of domain %s still running, it "
+ "will be handled as unattended", vm->def->name);
+ qemuProcessRestoreMigrationJob(vm, job);
+ return 0;
+ }
+
+ if (migStatus != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED) {
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
+ qemuMigrationSrcPostcopyFailed(vm);
+ else
+ qemuMigrationDstPostcopyFailed(vm);
+
+ qemuProcessRestoreMigrationJob(vm, job);
+ return 0;
+ }
+
+ VIR_DEBUG("Post-copy migration of domain %s already finished",
+ vm->def->name);
+ if (job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
+ qemuMigrationSrcComplete(driver, vm, VIR_ASYNC_JOB_NONE);
+ else
+ qemuMigrationDstComplete(driver, vm, true, VIR_ASYNC_JOB_NONE, job);
return 0;
}
--
2.35.1