[libvirt] [PATCH] qemu: Implement DomainPMSuspendForDuration

via user agent. --- src/qemu/qemu_agent.c | 31 +++++++++++++++++++ src/qemu/qemu_agent.h | 2 + src/qemu/qemu_driver.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 0 deletions(-) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 9df5546..a17d025 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1184,3 +1184,34 @@ cleanup: virJSONValueFree(reply); return ret; } + +VIR_ENUM_DECL(qemuAgentSuspendMode); + +VIR_ENUM_IMPL(qemuAgentSuspendMode, + VIR_NODE_SUSPEND_TARGET_LAST, + "guest-suspend-ram", + "guest-suspend-disk", + "guest-suspend-hybrid"); + +int +qemuAgentSuspend(qemuAgentPtr mon, + unsigned int target) +{ + int ret = -1; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + + cmd = qemuAgentMakeCommand(qemuAgentSuspendModeTypeToString(target), + NULL); + if (!cmd) + return -1; + + ret = qemuAgentCommand(mon, cmd, &reply); + + if (ret == 0) + ret = qemuAgentCheckError(cmd, reply); + + virJSONValueFree(cmd); + virJSONValueFree(reply); + return ret; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index df59ef7..98c23b0 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -69,4 +69,6 @@ int qemuAgentShutdown(qemuAgentPtr mon, int qemuAgentFSFreeze(qemuAgentPtr mon); int qemuAgentFSThaw(qemuAgentPtr mon); +int qemuAgentSuspend(qemuAgentPtr mon, + unsigned int target); #endif /* __QEMU_AGENT_H__ */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 52350f2..f91b885 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -12042,6 +12042,83 @@ cleanup: return ret; } +static int +qemuDomainPMSuspendForDuration(virDomainPtr dom, + unsigned int target, + unsigned long long duration, + unsigned int flags) +{ + struct qemud_driver *driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(0, -1); + + if (duration) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s", + _("Duration not supported. Use 0 for now")); + return -1; + } + + if (!(target == VIR_NODE_SUSPEND_TARGET_MEM || + target == VIR_NODE_SUSPEND_TARGET_DISK || + target == VIR_NODE_SUSPEND_TARGET_HYBRID)) { + qemuReportError(VIR_ERR_INVALID_ARG, + _("Unknown suspend target: %u"), + target); + return -1; + } + + qemuDriverLock(driver); + vm = virDomainFindByUUID(&driver->domains, dom->uuid); + qemuDriverUnlock(driver); + + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(dom->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + } + + priv = vm->privateData; + + if (priv->agentError) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("QEMU guest agent is not available due to an error")); + goto cleanup; + } + + if (!priv->agent) { + qemuReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("QEMU guest agent is not configured")); + goto cleanup; + } + + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + + qemuDomainObjEnterAgent(driver, vm); + ret = qemuAgentSuspend(priv->agent, target); + qemuDomainObjExitAgent(driver, vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + +cleanup: + if (vm) + virDomainObjUnlock(vm); + return ret; +} + static virDriver qemuDriver = { .no = VIR_DRV_QEMU, .name = "QEMU", @@ -12199,6 +12276,7 @@ static virDriver qemuDriver = { .domainGetDiskErrors = qemuDomainGetDiskErrors, /* 0.9.10 */ .domainSetMetadata = qemuDomainSetMetadata, /* 0.9.10 */ .domainGetMetadata = qemuDomainGetMetadata, /* 0.9.10 */ + .domainPMSuspendForDuration = qemuDomainPMSuspendForDuration, /* 0.9.10 */ }; -- 1.7.3.4

On 02/08/2012 07:27 AM, Michal Privoznik wrote:
via user agent. --- src/qemu/qemu_agent.c | 31 +++++++++++++++++++ src/qemu/qemu_agent.h | 2 + src/qemu/qemu_driver.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 0 deletions(-)
+ +int +qemuAgentSuspend(qemuAgentPtr mon, + unsigned int target) +{ + int ret = -1; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + + cmd = qemuAgentMakeCommand(qemuAgentSuspendModeTypeToString(target), + NULL); + if (!cmd) + return -1; + + ret = qemuAgentCommand(mon, cmd, &reply);
If we are running qemu 1.0 and guest agent 1.0, a request to suspend will be rejected (the guest agent doesn't know the command). If we are running qemu 1.1 and guest agent 1.1, a request to suspend should just work. But if we are running qemu 1.0 and guest agent 1.1, the command will suspend the guest, but we then have no way to wake it up. I don't think qemu_agent is the right place to check this, rather...
+static int +qemuDomainPMSuspendForDuration(virDomainPtr dom, + unsigned int target, + unsigned long long duration, + unsigned int flags) +{ + struct qemud_driver *driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(0, -1); + + if (duration) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s", + _("Duration not supported. Use 0 for now")); + return -1; + } + + if (!(target == VIR_NODE_SUSPEND_TARGET_MEM || + target == VIR_NODE_SUSPEND_TARGET_DISK || + target == VIR_NODE_SUSPEND_TARGET_HYBRID)) { + qemuReportError(VIR_ERR_INVALID_ARG, + _("Unknown suspend target: %u"), + target); + return -1; + } + + qemuDriverLock(driver); + vm = virDomainFindByUUID(&driver->domains, dom->uuid); + qemuDriverUnlock(driver); + + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(dom->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + }
...Here, after we've determined the vm, and thus the capabilities of the gemu running the vm, we should be checking for the QEMU_CAPS of whether the qemu is known to support wakeup. If wakeup is not supported (qemu 1.0), then we must reject TARGET_MEM and TARGET_HYBRID at this point; rather than calling an agent command that might succeed but get the guest wedged into a suspended state with no recovery. I'm also wondering if we need a new command to wake a guest that is in S3 mode (whether the guest itself requested that, or whether we requested it via the guest agent); that is, the new system_wakeup monitor command needs to be something (in addition to mouse clicks, serial console bytes, and timer expirations) that can cause a guest to resume from S3. Alas, virDomainResume has no flags argument, and I don't know if we have any other API that would be good for the purpose. -- Eric Blake eblake@redhat.com +1-919-301-3266 Libvirt virtualization library http://libvirt.org
participants (2)
-
Eric Blake
-
Michal Privoznik