Currently, all of domain "save/dump/managed save/migration"
use the same function "qemudDomainWaitForMigrationComplete"
to wait the job finished, but the error messages are all
about "migration", e.g. when a domain saving job is canceled
by user, "migration was cancled by client" will be throwed as
an error message, which will be confused for user.
As a solution, intoduce two new job types(QEMU_JOB_SAVE,
QEMU_JOB_DUMP), and set "priv->jobActive" to "QEMU_JOB_SAVE"
before saving, to "QEMU_JOB_DUMP" before dumping, so that we
could get the real job type in
"qemudDomainWaitForMigrationComplete", and give more clear
message further.
* src/qemu/qemu_driver.c
---
src/qemu/qemu_driver.c | 39 ++++++++++++++++++++++++++++++---------
1 files changed, 30 insertions(+), 9 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 19ce9a6..aed48f4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -104,6 +104,8 @@ enum qemuDomainJob {
QEMU_JOB_UNSPECIFIED,
QEMU_JOB_MIGRATION_OUT,
QEMU_JOB_MIGRATION_IN,
+ QEMU_JOB_SAVE,
+ QEMU_JOB_DUMP,
};
enum qemuDomainJobSignals {
@@ -5389,21 +5391,36 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver,
virDomainObjPtr
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
struct timeval now;
int rc;
+ const char *job;
+
+ switch (priv->jobActive) {
+ case QEMU_JOB_MIGRATION_OUT:
+ job = "migration";
+ break;
+ case QEMU_JOB_SAVE:
+ job = "domain saving";
+ break;
+ case QEMU_JOB_DUMP:
+ job = "domain core dump";
+ break;
+ default:
+ job = "job";
+ }
if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit during migration"));
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("guest unexpectedly quit during %s"), job);
goto cleanup;
}
if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
- VIR_DEBUG0("Cancelling migration at client request");
+ VIR_DEBUG("Cancelling %s at client request", job);
qemuDomainObjEnterMonitorWithDriver(driver, vm);
rc = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
if (rc < 0) {
- VIR_WARN0("Unable to cancel migration");
+ VIR_WARN("Unable to cancel %s", job);
}
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
@@ -5427,8 +5444,8 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver,
virDomainObjPtr
* guest to die
*/
if (!virDomainObjIsActive(vm)) {
- qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
- _("guest unexpectedly quit during migration"));
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("guest unexpectedly quit during %s"), job);
goto cleanup;
}
@@ -5459,7 +5476,7 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver,
virDomainObjPtr
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("Migration is not active"));
+ _("%s is not active"), job);
break;
case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
@@ -5480,13 +5497,13 @@ qemuDomainWaitForMigrationComplete(struct qemud_driver *driver,
virDomainObjPtr
case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("Migration unexpectedly
failed"));
+ _("%s unexpectedly failed"), job);
break;
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
qemuReportError(VIR_ERR_OPERATION_FAILED,
- "%s", _("Migration was cancelled by
client"));
+ _("%s was cancelled by client"), job);
break;
}
@@ -5606,6 +5623,8 @@ static int qemudDomainSaveFlag(struct qemud_driver *driver,
virDomainPtr dom,
goto endjob;
}
+ priv->jobActive = QEMU_JOB_SAVE;
+
memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
@@ -6198,6 +6217,8 @@ static int qemudDomainCoreDump(virDomainPtr dom,
goto endjob;
}
+ priv->jobActive = QEMU_JOB_DUMP;
+
/* Migrate will always stop the VM, so the resume condition is
independent of whether the stop command is issued. */
resume = (vm->state == VIR_DOMAIN_RUNNING);
--
1.7.3.2