References:
-
https://www.redhat.com/archives/libvir-list/2011-May/msg00210.html
-
https://www.redhat.com/archives/libvir-list/2011-May/msg00287.html
---
src/qemu/qemu_domain.c | 6 +++
src/qemu/qemu_domain.h | 7 ++++
src/qemu/qemu_driver.c | 86 +++++++++++++++++++++++++++++++--------------
src/qemu/qemu_migration.c | 31 ++++++++++++++++
4 files changed, 103 insertions(+), 27 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index c61f9bf..d4e53c4 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -526,6 +526,12 @@ int qemuDomainObjBeginJob(virDomainObjPtr obj)
priv->jobStart = timeval_to_ms(now);
memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
+ if (virCondInit(&priv->signalCond) < 0) {
+ virReportSystemError(errno,
+ "%s", _("cannot initialize signal
condition"));
+ return -1;
+ }
+
return 0;
}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 6d24f53..b82986c 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -47,11 +47,17 @@ enum qemuDomainJobSignals {
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live
migration offline */
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change
*/
QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
+ QEMU_JOB_SIGNAL_BLKSTAT = 1 << 4, /* Request blkstat during migration */
+ QEMU_JOB_SIGNAL_BLKINFO = 1 << 5, /* Request blkinfo during migration */
};
struct qemuDomainJobSignalsData {
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
+ char *devname; /* Device name used by blkstat/blkinfo calls */
+ int returnCode; /* Return code for the blkstat/blkinfo calls */
+ virDomainBlockStatsPtr blockStat; /* Block statistics for QEMU_JOB_SIGNAL_BLKSTAT */
+ virDomainBlockInfoPtr blockInfo; /* Block information for QEMU_JOB_SIGNAL_BLKINFO */
};
typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet;
@@ -61,6 +67,7 @@ typedef struct _qemuDomainObjPrivate qemuDomainObjPrivate;
typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr;
struct _qemuDomainObjPrivate {
virCond jobCond; /* Use in conjunction with main virDomainObjPtr lock */
+ virCond signalCond; /* Use in conjunction with main virDomainObjPtr lock */
enum qemuDomainJob jobActive; /* Currently running job */
unsigned int jobSignals; /* Signals for running job */
struct qemuDomainJobSignalsData jobSignalsData; /* Signal specific data */
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 0fd0f10..f9f5e83 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5031,13 +5031,10 @@ qemudDomainBlockStats (virDomainPtr dom,
goto cleanup;
}
- if (qemuDomainObjBeginJob(vm) < 0)
- goto cleanup;
-
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto endjob;
+ goto cleanup;
}
for (i = 0 ; i < vm->def->ndisks ; i++) {
@@ -5050,29 +5047,48 @@ qemudDomainBlockStats (virDomainPtr dom,
if (!disk) {
qemuReportError(VIR_ERR_INVALID_ARG,
_("invalid path: %s"), path);
- goto endjob;
+ goto cleanup;
}
if (!disk->info.alias) {
qemuReportError(VIR_ERR_INTERNAL_ERROR,
_("missing disk device alias name for %s"),
disk->dst);
- goto endjob;
+ goto cleanup;
}
priv = vm->privateData;
- qemuDomainObjEnterMonitor(vm);
- ret = qemuMonitorGetBlockStatsInfo(priv->mon,
- disk->info.alias,
- &stats->rd_req,
- &stats->rd_bytes,
- &stats->wr_req,
- &stats->wr_bytes,
- &stats->errs);
- qemuDomainObjExitMonitor(vm);
+ if ((priv->jobActive == QEMU_JOB_MIGRATION_OUT)
+ || (priv->jobActive == QEMU_JOB_SAVE)) {
-endjob:
- if (qemuDomainObjEndJob(vm) == 0)
- vm = NULL;
+ while (priv->jobSignals)
+ ignore_value(virCondWait(&priv->signalCond, &vm->lock));
+
+ priv->jobSignals |= QEMU_JOB_SIGNAL_BLKSTAT;
+ priv->jobSignalsData.devname = disk->info.alias;
+ priv->jobSignalsData.blockStat = stats;
+ priv->jobSignalsData.returnCode = -1;
+
+ while (priv->jobSignals & QEMU_JOB_SIGNAL_BLKSTAT)
+ ignore_value(virCondWait(&priv->signalCond, &vm->lock));
+
+ ret = priv->jobSignalsData.returnCode;
+ } else {
+ if (qemuDomainObjBeginJob(vm) < 0)
+ goto cleanup;
+
+ qemuDomainObjEnterMonitor(vm);
+ ret = qemuMonitorGetBlockStatsInfo(priv->mon,
+ disk->info.alias,
+ &stats->rd_req,
+ &stats->rd_bytes,
+ &stats->wr_req,
+ &stats->wr_bytes,
+ &stats->errs);
+ qemuDomainObjExitMonitor(vm);
+
+ if (qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+ }
cleanup:
if (vm)
@@ -5473,23 +5489,39 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
disk format and on a block device, then query
highest allocated extent from QEMU */
if (disk->type == VIR_DOMAIN_DISK_TYPE_BLOCK &&
- format != VIR_STORAGE_FILE_RAW &&
- S_ISBLK(sb.st_mode)) {
+ format != VIR_STORAGE_FILE_RAW && S_ISBLK(sb.st_mode)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuDomainObjBeginJob(vm) < 0)
- goto cleanup;
- if (!virDomainObjIsActive(vm))
+
+ if (!virDomainObjIsActive(vm)) {
ret = 0;
- else {
+ } else if ((priv->jobActive == QEMU_JOB_MIGRATION_OUT)
+ || (priv->jobActive == QEMU_JOB_SAVE)) {
+
+ while (priv->jobSignals)
+ ignore_value(virCondWait(&priv->signalCond, &vm->lock));
+
+ priv->jobSignals |= QEMU_JOB_SIGNAL_BLKINFO;
+ priv->jobSignalsData.devname = disk->info.alias;
+ priv->jobSignalsData.blockInfo = info;
+ priv->jobSignalsData.returnCode = -1;
+
+ while (priv->jobSignals & QEMU_JOB_SIGNAL_BLKINFO)
+ ignore_value(virCondWait(&priv->signalCond, &vm->lock));
+
+ ret = priv->jobSignalsData.returnCode;
+ } else {
+ if (qemuDomainObjBeginJob(vm) < 0)
+ goto cleanup;
+
qemuDomainObjEnterMonitor(vm);
ret = qemuMonitorGetBlockExtent(priv->mon,
disk->info.alias,
&info->allocation);
qemuDomainObjExitMonitor(vm);
- }
- if (qemuDomainObjEndJob(vm) == 0)
- vm = NULL;
+ if (qemuDomainObjEndJob(vm) == 0)
+ vm = NULL;
+ }
} else {
ret = 0;
}
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 6738a53..54e41f9 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -156,6 +156,34 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver,
virDomainObjPtr vm)
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0)
VIR_WARN0("Unable to set migration speed");
+ } else if (priv->jobSignals & QEMU_JOB_SIGNAL_BLKSTAT) {
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ rc = qemuMonitorGetBlockStatsInfo(priv->mon,
+ priv->jobSignalsData.devname,
+ &priv->jobSignalsData.blockStat->rd_req,
+ &priv->jobSignalsData.blockStat->rd_bytes,
+ &priv->jobSignalsData.blockStat->wr_req,
+ &priv->jobSignalsData.blockStat->wr_bytes,
+ &priv->jobSignalsData.blockStat->errs);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ priv->jobSignalsData.returnCode = rc;
+ priv->jobSignals ^= QEMU_JOB_SIGNAL_BLKSTAT;
+
+ if (rc < 0)
+ VIR_WARN0("Unable to get block statistics");
+ } else if (priv->jobSignals & QEMU_JOB_SIGNAL_BLKINFO) {
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ rc = qemuMonitorGetBlockExtent(priv->mon,
+ priv->jobSignalsData.devname,
+ &priv->jobSignalsData.blockInfo->allocation);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ priv->jobSignalsData.returnCode = rc;
+ priv->jobSignals ^= QEMU_JOB_SIGNAL_BLKINFO;
+
+ if (rc < 0)
+ VIR_WARN0("Unable to get block information");
}
/* Repeat check because the job signals might have caused
@@ -223,6 +251,8 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver,
virDomainObjPtr vm)
break;
}
+ virCondSignal(&priv->signalCond);
+
virDomainObjUnlock(vm);
qemuDriverUnlock(driver);
@@ -233,6 +263,7 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver,
virDomainObjPtr vm)
}
cleanup:
+ virCondBroadcast(&priv->signalCond);
return ret;
}
--
1.7.1