On Wed, Apr 11, 2012 at 05:40:37PM -0600, Eric Blake wrote:
Without the VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC flag, libvirt will
internally
poll using qemu's "query-block-jobs" API and will not return until the
operation has been completed. API users are advised that this operation
is unbounded and further interaction with the domain during this period
may block. Future patches may refactor things to allow other queries in
parallel with this polling. For older qemu, we synthesize the cancelation
event, since qemu won't generate it.
Signed-off-by: Adam Litke <agl(a)us.ibm.com>
Cc: Stefan Hajnoczi <stefanha(a)gmail.com>
Signed-off-by: Eric Blake <eblake(a)redhat.com>
---
src/qemu/qemu_driver.c | 75 ++++++++++++++++++++++++++++++++++++-----
src/qemu/qemu_monitor_json.c | 45 +++++++++++++++++++++----
2 files changed, 103 insertions(+), 17 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f939a31..bb6089b 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -11599,7 +11599,7 @@ cleanup:
static int
qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const char *base,
unsigned long bandwidth, virDomainBlockJobInfoPtr info,
- int mode)
+ int mode, unsigned int flags)
{
struct qemud_driver *driver = dom->conn->privateData;
virDomainObjPtr vm = NULL;
@@ -11608,6 +11608,9 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const
char *base,
char *device = NULL;
int ret = -1;
bool async = false;
+ virDomainEventPtr event = NULL;
+ int idx;
+ virDomainDiskDefPtr disk;
qemuDriverLock(driver);
virUUIDFormat(dom->uuid, uuidstr);
@@ -11631,10 +11634,10 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path,
const char *base,
goto cleanup;
}
- device = qemuDiskPathToAlias(vm, path, NULL);
- if (!device) {
+ device = qemuDiskPathToAlias(vm, path, &idx);
+ if (!device)
goto cleanup;
- }
+ disk = vm->def->disks[idx];
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
@@ -11652,6 +11655,54 @@ qemuDomainBlockJobImpl(virDomainPtr dom, const char *path, const
char *base,
ret = qemuMonitorBlockJob(priv->mon, device, base, bandwidth, info, mode,
async);
qemuDomainObjExitMonitorWithDriver(driver, vm);
+ if (ret < 0)
+ goto endjob;
+
+ /* With synchronous block cancel, we must synthesize an event, and
+ * we silently ignore the ABORT_ASYNC flag. With asynchronous
+ * block cancel, the event will come from qemu, but without the
+ * ABORT_ASYNC flag, we must block to guarantee synchronous
+ * operation. We do the waiting while still holding the VM job,
+ * to prevent newly scheduled block jobs from confusing us.
+ */
+ if (mode == BLOCK_JOB_ABORT) {
+ if (!async) {
+ int type = VIR_DOMAIN_BLOCK_JOB_TYPE_PULL;
+ int status = VIR_DOMAIN_BLOCK_JOB_CANCELED;
+ event = virDomainEventBlockJobNewFromObj(vm, disk->src, type,
+ status);
+ } else if (!(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
+ while (1) {
+ /* Poll every 50ms */
+ static struct timespec ts = { .tv_sec = 0,
+ .tv_nsec = 50 * 1000 * 1000ull };
+ virDomainBlockJobInfo dummy;
+
+ qemuDomainObjEnterMonitorWithDriver(driver, vm);
+ ret = qemuMonitorBlockJob(priv->mon, device, NULL, 0, &dummy,
+ BLOCK_JOB_INFO, async);
+ qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+ if (ret <= 0)
+ break;
+
+ virDomainObjUnlock(vm);
+ qemuDriverUnlock(driver);
+
+ nanosleep(&ts, NULL);
Okay, I was wondering how to justify the arbitrary 50ms, we use the
same value on migration for potential user input. It's slightly over
the human perception delay, but for those kind of operation that
sounds fine...
+ qemuDriverLock(driver);
+ virDomainObjLock(vm);
+
+ if (!virDomainObjIsActive(vm)) {
+ qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("domain is not running"));
+ ret = -1;
+ break;
+ }
+ }
+ }
+ }
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0) {
@@ -11663,6 +11714,8 @@ cleanup:
VIR_FREE(device);
if (vm)
virDomainObjUnlock(vm);
+ if (event)
+ qemuDomainEventQueue(driver, event);
qemuDriverUnlock(driver);
return ret;
}
@@ -11670,8 +11723,9 @@ cleanup:
static int
qemuDomainBlockJobAbort(virDomainPtr dom, const char *path, unsigned int flags)
{
- virCheckFlags(0, -1);
- return qemuDomainBlockJobImpl(dom, path, NULL, 0, NULL, BLOCK_JOB_ABORT);
+ virCheckFlags(VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC, -1);
+ return qemuDomainBlockJobImpl(dom, path, NULL, 0, NULL, BLOCK_JOB_ABORT,
+ flags);
}
static int
@@ -11679,7 +11733,8 @@ qemuDomainGetBlockJobInfo(virDomainPtr dom, const char *path,
virDomainBlockJobInfoPtr info, unsigned int flags)
{
virCheckFlags(0, -1);
- return qemuDomainBlockJobImpl(dom, path, NULL, 0, info, BLOCK_JOB_INFO);
+ return qemuDomainBlockJobImpl(dom, path, NULL, 0, info, BLOCK_JOB_INFO,
+ flags);
}
static int
@@ -11688,7 +11743,7 @@ qemuDomainBlockJobSetSpeed(virDomainPtr dom, const char *path,
{
virCheckFlags(0, -1);
return qemuDomainBlockJobImpl(dom, path, NULL, bandwidth, NULL,
- BLOCK_JOB_SPEED);
+ BLOCK_JOB_SPEED, flags);
}
static int
@@ -11699,10 +11754,10 @@ qemuDomainBlockRebase(virDomainPtr dom, const char *path, const
char *base,
virCheckFlags(0, -1);
ret = qemuDomainBlockJobImpl(dom, path, base, bandwidth, NULL,
- BLOCK_JOB_PULL);
+ BLOCK_JOB_PULL, flags);
if (ret == 0 && bandwidth != 0)
ret = qemuDomainBlockJobImpl(dom, path, NULL, bandwidth, NULL,
- BLOCK_JOB_SPEED);
+ BLOCK_JOB_SPEED, flags);
return ret;
}
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index f7d9ef2..242889c 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -58,13 +58,14 @@ static void qemuMonitorJSONHandleIOError(qemuMonitorPtr mon,
virJSONValuePtr dat
static void qemuMonitorJSONHandleVNCConnect(qemuMonitorPtr mon, virJSONValuePtr data);
static void qemuMonitorJSONHandleVNCInitialize(qemuMonitorPtr mon, virJSONValuePtr
data);
static void qemuMonitorJSONHandleVNCDisconnect(qemuMonitorPtr mon, virJSONValuePtr
data);
-static void qemuMonitorJSONHandleBlockJob(qemuMonitorPtr mon, virJSONValuePtr data);
static void qemuMonitorJSONHandleSPICEConnect(qemuMonitorPtr mon, virJSONValuePtr
data);
static void qemuMonitorJSONHandleSPICEInitialize(qemuMonitorPtr mon, virJSONValuePtr
data);
static void qemuMonitorJSONHandleSPICEDisconnect(qemuMonitorPtr mon, virJSONValuePtr
data);
static void qemuMonitorJSONHandleTrayChange(qemuMonitorPtr mon, virJSONValuePtr data);
static void qemuMonitorJSONHandlePMWakeup(qemuMonitorPtr mon, virJSONValuePtr data);
static void qemuMonitorJSONHandlePMSuspend(qemuMonitorPtr mon, virJSONValuePtr data);
+static void qemuMonitorJSONHandleBlockJobCompleted(qemuMonitorPtr mon, virJSONValuePtr
data);
+static void qemuMonitorJSONHandleBlockJobCanceled(qemuMonitorPtr mon, virJSONValuePtr
data);
typedef struct {
const char *type;
@@ -72,7 +73,8 @@ typedef struct {
} qemuEventHandler;
static qemuEventHandler eventHandlers[] = {
{ "BLOCK_IO_ERROR", qemuMonitorJSONHandleIOError, },
- { "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJob, },
+ { "BLOCK_JOB_CANCELLED", qemuMonitorJSONHandleBlockJobCanceled, },
+ { "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJobCompleted, },
{ "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, },
{ "POWERDOWN", qemuMonitorJSONHandlePowerdown, },
{ "RESET", qemuMonitorJSONHandleReset, },
@@ -762,13 +764,15 @@ static void qemuMonitorJSONHandleSPICEDisconnect(qemuMonitorPtr
mon, virJSONValu
qemuMonitorJSONHandleGraphics(mon, data, VIR_DOMAIN_EVENT_GRAPHICS_DISCONNECT);
}
-static void qemuMonitorJSONHandleBlockJob(qemuMonitorPtr mon, virJSONValuePtr data)
+static void
+qemuMonitorJSONHandleBlockJobImpl(qemuMonitorPtr mon,
+ virJSONValuePtr data,
+ int event)
{
const char *device;
const char *type_str;
int type = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
unsigned long long offset, len;
- int status = VIR_DOMAIN_BLOCK_JOB_FAILED;
if ((device = virJSONValueObjectGetString(data, "device")) == NULL) {
VIR_WARN("missing device in block job event");
@@ -793,11 +797,22 @@ static void qemuMonitorJSONHandleBlockJob(qemuMonitorPtr mon,
virJSONValuePtr da
if (STREQ(type_str, "stream"))
type = VIR_DOMAIN_BLOCK_JOB_TYPE_PULL;
- if (offset != 0 && offset == len)
- status = VIR_DOMAIN_BLOCK_JOB_COMPLETED;
+ switch ((virConnectDomainEventBlockJobStatus) event) {
+ case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
+ /* Make sure the whole device has been processed */
+ if (offset != len)
+ event = VIR_DOMAIN_BLOCK_JOB_FAILED;
+ break;
+ case VIR_DOMAIN_BLOCK_JOB_CANCELED:
+ break;
+ case VIR_DOMAIN_BLOCK_JOB_FAILED:
+ case VIR_DOMAIN_BLOCK_JOB_LAST:
+ VIR_DEBUG("should not get here");
+ break;
+ }
out:
- qemuMonitorEmitBlockJob(mon, device, type, status);
+ qemuMonitorEmitBlockJob(mon, device, type, event);
}
static void
@@ -840,6 +855,22 @@ qemuMonitorJSONHandlePMSuspend(qemuMonitorPtr mon,
qemuMonitorEmitPMSuspend(mon);
}
+static void
+qemuMonitorJSONHandleBlockJobCompleted(qemuMonitorPtr mon,
+ virJSONValuePtr data)
+{
+ qemuMonitorJSONHandleBlockJobImpl(mon, data,
+ VIR_DOMAIN_BLOCK_JOB_COMPLETED);
+}
+
+static void
+qemuMonitorJSONHandleBlockJobCanceled(qemuMonitorPtr mon,
+ virJSONValuePtr data)
+{
+ qemuMonitorJSONHandleBlockJobImpl(mon, data,
+ VIR_DOMAIN_BLOCK_JOB_CANCELED);
+}
+
int
qemuMonitorJSONHumanCommandWithFd(qemuMonitorPtr mon,
const char *cmd_str,
ACK, I was wondering how we could test for those events but since this
requires an actual qemu-kvm process we really can't implement it as
part of the existing python event test program.
Daniel
--
Daniel Veillard | libxml Gnome XML XSLT toolkit
http://xmlsoft.org/
daniel(a)veillard.com | Rpmfind RPM search engine
http://rpmfind.net/
http://veillard.com/ | virtualization library
http://libvirt.org/