Looks like it is more simple to drop this optimization as we are
going to add getting disks stats during migration via quering qemu
process and checking if we have to acquire job condition becomes
more complicate.
---
src/qemu/qemu_driver.c | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index b592abe..bb7a64d 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -12741,7 +12741,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
qemuDomainJobInfoPtr jobInfo)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- bool fetch = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
+ bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
int ret = -1;
if (completed) {
@@ -12761,12 +12761,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
return -1;
}
- /* Do not ask QEMU if migration is not even running yet */
- if (!priv->job.current ||
- priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE)
- fetch = false;
-
- if (fetch && qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
return -1;
if (!virDomainObjIsActive(vm)) {
@@ -12785,7 +12780,8 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
- if (fetch &&
+
+ if (events && jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE
&&
qemuMigrationFetchMigrationStats(driver, vm, QEMU_ASYNC_JOB_NONE,
&jobInfo->stats, false) < 0)
goto cleanup;
@@ -12797,8 +12793,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver,
ret = 0;
cleanup:
- if (fetch)
- qemuDomainObjEndJob(driver, vm);
+ qemuDomainObjEndJob(driver, vm);
return ret;
}
--
1.8.3.1