Looks like it is more simple to drop this optimization as we are
going to add getting disks stats during migration via quering qemu
process and checking if we have to acquire job condition becomes
more complicate.
---
src/qemu/qemu_driver.c | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 17ccd9e..11226b1 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -12985,7 +12985,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
qemuDomainJobInfoPtr jobInfo)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- bool fetch = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
+ bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
int ret = -1;
if (completed) {
@@ -13004,12 +13004,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
return -1;
}
- /* Do not ask QEMU if migration is not even running yet */
- if (!priv->job.current ||
- priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE)
- fetch = false;
-
- if (fetch && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
return -1;
if (!virDomainObjIsActive(vm)) {
@@ -13028,7 +13023,8 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
- if (fetch &&
+ if (events &&
+ jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
qemuMigrationFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE, jobInfo) < 0)
goto cleanup;
@@ -13039,8 +13035,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
ret = 0;
cleanup:
- if (fetch)
- qemuDomainObjEndJob(driver, vm);
+ qemuDomainObjEndJob(driver, vm);
return ret;
}
--
1.8.3.1