When QEMU reported failed or canceled migration, we correctly detected
it but didn't really consider it as an error condition and migration
protocol just went on. Luckily, some of the subsequent steps eventually
failed end we reported an (unrelated and mostly random) error back to
the caller.
---
src/qemu/qemu_migration.c | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 1bee66a..43e56b7 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -878,38 +878,39 @@ qemuMigrationSetOffline(struct qemud_driver *driver,
static int
qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
virDomainObjPtr vm,
const char *job,
enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- int ret = -1;
+ int ret;
int status;
unsigned long long memProcessed;
unsigned long long memRemaining;
unsigned long long memTotal;
ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
if (ret < 0) {
/* Guest already exited; nothing further to update. */
return -1;
}
ret = qemuMonitorGetMigrationStatus(priv->mon,
&status,
&memProcessed,
&memRemaining,
&memTotal);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (ret < 0 || virTimeMillisNow(&priv->job.info.timeElapsed) < 0) {
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
return -1;
}
priv->job.info.timeElapsed -= priv->job.start;
+ ret = -1;
switch (status) {
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
priv->job.info.type = VIR_DOMAIN_JOB_NONE;
qemuReportError(VIR_ERR_OPERATION_FAILED,
_("%s: %s"), job, _("is not active"));
break;
--
1.7.8.5