[libvirt] [PATCH v2] qemu: Implement DomainPMSuspendForDuration

via user agent. --- diff to v1: -allow mem and hybrid targets iff system_wakeup monitor command is presented src/qemu/qemu_agent.c | 31 +++++++++++++++ src/qemu/qemu_agent.h | 2 + src/qemu/qemu_capabilities.c | 1 + src/qemu/qemu_capabilities.h | 1 + src/qemu/qemu_driver.c | 87 ++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor.c | 21 +++++----- src/qemu/qemu_monitor.h | 4 +- src/qemu/qemu_monitor_json.c | 15 ++++--- src/qemu/qemu_monitor_json.h | 5 ++- src/qemu/qemu_process.c | 2 +- 10 files changed, 149 insertions(+), 20 deletions(-) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 9df5546..a17d025 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1184,3 +1184,34 @@ cleanup: virJSONValueFree(reply); return ret; } + +VIR_ENUM_DECL(qemuAgentSuspendMode); + +VIR_ENUM_IMPL(qemuAgentSuspendMode, + VIR_NODE_SUSPEND_TARGET_LAST, + "guest-suspend-ram", + "guest-suspend-disk", + "guest-suspend-hybrid"); + +int +qemuAgentSuspend(qemuAgentPtr mon, + unsigned int target) +{ + int ret = -1; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + + cmd = qemuAgentMakeCommand(qemuAgentSuspendModeTypeToString(target), + NULL); + if (!cmd) + return -1; + + ret = qemuAgentCommand(mon, cmd, &reply); + + if (ret == 0) + ret = qemuAgentCheckError(cmd, reply); + + virJSONValueFree(cmd); + virJSONValueFree(reply); + return ret; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index df59ef7..98c23b0 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -69,4 +69,6 @@ int qemuAgentShutdown(qemuAgentPtr mon, int qemuAgentFSFreeze(qemuAgentPtr mon); int qemuAgentFSThaw(qemuAgentPtr mon); +int qemuAgentSuspend(qemuAgentPtr mon, + unsigned int target); #endif /* __QEMU_AGENT_H__ */ diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c index 426637c..6d35676 100644 --- a/src/qemu/qemu_capabilities.c +++ b/src/qemu/qemu_capabilities.c @@ -152,6 +152,7 @@ VIR_ENUM_IMPL(qemuCaps, QEMU_CAPS_LAST, "fsdev-writeout", "drive-iotune", /* 85 */ + "system_wakeup", ); struct qemu_feature_flags { diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h index 18d6bc8..b9666e1 100644 --- a/src/qemu/qemu_capabilities.h +++ b/src/qemu/qemu_capabilities.h @@ -120,6 +120,7 @@ enum qemuCapsFlags { QEMU_CAPS_CPU_HOST = 83, /* support for -cpu host */ QEMU_CAPS_FSDEV_WRITEOUT = 84, /* -fsdev writeout supported */ QEMU_CAPS_DRIVE_IOTUNE = 85, /* -drive bps= and friends */ + QEMU_CAPS_WAKEUP = 86, /* system_wakeup monitor command */ QEMU_CAPS_LAST, /* this must always be the last item */ }; diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 160cb37..d342fae 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -12041,6 +12041,92 @@ cleanup: return ret; } +static int +qemuDomainPMSuspendForDuration(virDomainPtr dom, + unsigned int target, + unsigned long long duration, + unsigned int flags) +{ + struct qemud_driver *driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(0, -1); + + if (duration) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s", + _("Duration not supported. Use 0 for now")); + return -1; + } + + if (!(target == VIR_NODE_SUSPEND_TARGET_MEM || + target == VIR_NODE_SUSPEND_TARGET_DISK || + target == VIR_NODE_SUSPEND_TARGET_HYBRID)) { + qemuReportError(VIR_ERR_INVALID_ARG, + _("Unknown suspend target: %u"), + target); + return -1; + } + + qemuDriverLock(driver); + vm = virDomainFindByUUID(&driver->domains, dom->uuid); + qemuDriverUnlock(driver); + + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(dom->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + } + + priv = vm->privateData; + + if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_WAKEUP) && + (target == VIR_NODE_SUSPEND_TARGET_MEM || + target == VIR_NODE_SUSPEND_TARGET_HYBRID)) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s", + _("Unable to suspend domain due to " + "missing system_wakeup monitor command")); + goto cleanup; + } + + if (priv->agentError) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("QEMU guest agent is not available due to an error")); + goto cleanup; + } + + if (!priv->agent) { + qemuReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("QEMU guest agent is not configured")); + goto cleanup; + } + + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + + qemuDomainObjEnterAgent(driver, vm); + ret = qemuAgentSuspend(priv->agent, target); + qemuDomainObjExitAgent(driver, vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + +cleanup: + if (vm) + virDomainObjUnlock(vm); + return ret; +} + static virDriver qemuDriver = { .no = VIR_DRV_QEMU, .name = "QEMU", @@ -12198,6 +12284,7 @@ static virDriver qemuDriver = { .domainGetDiskErrors = qemuDomainGetDiskErrors, /* 0.9.10 */ .domainSetMetadata = qemuDomainSetMetadata, /* 0.9.10 */ .domainGetMetadata = qemuDomainGetMetadata, /* 0.9.10 */ + .domainPMSuspendForDuration = qemuDomainPMSuspendForDuration, /* 0.9.10 */ }; diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 7872e3d..93f3505 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -996,9 +996,11 @@ int qemuMonitorEmitBlockJob(qemuMonitorPtr mon, -int qemuMonitorSetCapabilities(qemuMonitorPtr mon) +int qemuMonitorSetCapabilities(qemuMonitorPtr mon, + virBitmapPtr qemuCaps) { int ret; + int json_hmp; VIR_DEBUG("mon=%p", mon); if (!mon) { @@ -1009,19 +1011,16 @@ int qemuMonitorSetCapabilities(qemuMonitorPtr mon) if (mon->json) { ret = qemuMonitorJSONSetCapabilities(mon); - if (ret == 0) { - int hmp = qemuMonitorJSONCheckHMP(mon); - if (hmp < 0) { - /* qemu may quited unexpectedly when we call - * qemuMonitorJSONCheckHMP() */ - ret = -1; - } else { - mon->json_hmp = hmp > 0; - } - } + if (ret) + goto cleanup; + + ret = qemuMonitorJSONCheckCommands(mon, qemuCaps, &json_hmp); + mon->json_hmp = json_hmp > 0; } else { ret = 0; } + +cleanup: return ret; } diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 5945d3e..7c6c52b 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -29,6 +29,7 @@ # include "domain_conf.h" # include "qemu_conf.h" +# include "bitmap.h" # include "virhash.h" typedef struct _qemuMonitor qemuMonitor; @@ -135,7 +136,8 @@ qemuMonitorPtr qemuMonitorOpen(virDomainObjPtr vm, void qemuMonitorClose(qemuMonitorPtr mon); -int qemuMonitorSetCapabilities(qemuMonitorPtr mon); +int qemuMonitorSetCapabilities(qemuMonitorPtr mon, + virBitmapPtr qemuCaps); int qemuMonitorCheckHMP(qemuMonitorPtr mon, const char *cmd); diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index b0ae570..513788a 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -34,6 +34,7 @@ #include "qemu_monitor_text.h" #include "qemu_monitor_json.h" #include "qemu_command.h" +#include "qemu_capabilities.h" #include "memory.h" #include "logging.h" #include "driver.h" @@ -800,7 +801,9 @@ qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon) * human-monitor-command worked or -1 on failure */ int -qemuMonitorJSONCheckHMP(qemuMonitorPtr mon) +qemuMonitorJSONCheckCommands(qemuMonitorPtr mon, + virBitmapPtr qemuCaps, + int *json_hmp) { int ret = -1; virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("query-commands", NULL); @@ -828,13 +831,13 @@ qemuMonitorJSONCheckHMP(qemuMonitorPtr mon) !(name = virJSONValueObjectGetString(entry, "name"))) goto cleanup; - if (STREQ(name, "human-monitor-command")) { - ret = 1; - goto cleanup; - } + if (STREQ(name, "human-monitor-command")) + *json_hmp = 1; + + if (STREQ(name, "system_wakeup")) + qemuCapsSet(qemuCaps, QEMU_CAPS_WAKEUP); } - /* human-monitor-command is not supported */ ret = 0; cleanup: diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index d221e59..d0b3000 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -28,6 +28,7 @@ # include "internal.h" # include "qemu_monitor.h" +# include "bitmap.h" int qemuMonitorJSONIOProcess(qemuMonitorPtr mon, const char *data, @@ -41,7 +42,9 @@ int qemuMonitorJSONHumanCommandWithFd(qemuMonitorPtr mon, int qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon); -int qemuMonitorJSONCheckHMP(qemuMonitorPtr mon); +int qemuMonitorJSONCheckCommands(qemuMonitorPtr mon, + virBitmapPtr qemuCaps, + int *json_hmp); int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon, virConnectPtr conn); diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 2d92d66..7afb585 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1086,7 +1086,7 @@ qemuConnectMonitor(struct qemud_driver *driver, virDomainObjPtr vm) qemuDomainObjEnterMonitorWithDriver(driver, vm); - ret = qemuMonitorSetCapabilities(priv->mon); + ret = qemuMonitorSetCapabilities(priv->mon, priv->qemuCaps); qemuDomainObjExitMonitorWithDriver(driver, vm); error: -- 1.7.3.4

On 02/10/2012 04:25 AM, Michal Privoznik wrote:
via user agent. --- diff to v1: -allow mem and hybrid targets iff system_wakeup monitor command is presented
Before I review too much of this, remember that there is still a pending discussion that might rename the API slightly to add in the wakeup.
src/qemu/qemu_agent.c | 31 +++++++++++++++ src/qemu/qemu_agent.h | 2 + src/qemu/qemu_capabilities.c | 1 + src/qemu/qemu_capabilities.h | 1 + src/qemu/qemu_driver.c | 87 ++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor.c | 21 +++++----- src/qemu/qemu_monitor.h | 4 +- src/qemu/qemu_monitor_json.c | 15 ++++--- src/qemu/qemu_monitor_json.h | 5 ++- src/qemu/qemu_process.c | 2 +- 10 files changed, 149 insertions(+), 20 deletions(-)
I'm wondering if it might be worth dividing this into two patches - the first that refactors qemu_capabilities and qemu_monitor to set QEMU_CAPS_WAKEUP, and the second that touches qemu_agent, qemu_driver, and qemu_monitor to wire up the suspend command. Another thing that would be nice to do would be improving the qemuhelptest.c to allow the testsuite to start probing for JSON responses; I would add some new files qemuhelpdata/qemu-*-monitor, which contains the JSON output corresponding to the query-commands input for the varous qemu versions that support JSON, so we make sure we can enable caps according to what json strings we parse.
+static int +qemuDomainPMSuspendForDuration(virDomainPtr dom, + unsigned int target, + unsigned long long duration, + unsigned int flags) +{ + struct qemud_driver *driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(0, -1); + + if (duration) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s",
VIR_ERR_ARGUMENT_UNSUPPORTED - the call was valid per the documentation, but we don't support it.
+ _("Duration not supported. Use 0 for now")); + return -1; + } + + if (!(target == VIR_NODE_SUSPEND_TARGET_MEM || + target == VIR_NODE_SUSPEND_TARGET_DISK || + target == VIR_NODE_SUSPEND_TARGET_HYBRID)) { + qemuReportError(VIR_ERR_INVALID_ARG,
This one's fine - the call really is invalid per the documentation.
+ + if (priv->agentError) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("QEMU guest agent is not available due to an error")); + goto cleanup; + }
Meta-question - is it right to have a set-once priv->agentError flag? Or, since the agent can come and go at will, an error encountered previously might be overcome if this time around we send a new sync flag. Thus, would it be better to have a dynamic function call, where the task of re-synchronizing to the guest is done at that point, and which either returns 0 if the guest is currently in sync or -1 if the resync failed for any reason? But this would affect all uses of the guest agent, so for now, keeping this call consistent with other callers is okay.
+ + if (!priv->agent) { + qemuReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("QEMU guest agent is not configured")); + goto cleanup; + }
In fact, if you have a function call, then you could inline the priv->agent check into that function, for less redundancy in all callers that need an active agent.
+ + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup;
Hmm - if we do have a helper function to resync the agent, it would have to be done inside the job condition.
@@ -1009,19 +1011,16 @@ int qemuMonitorSetCapabilities(qemuMonitorPtr mon)
if (mon->json) { ret = qemuMonitorJSONSetCapabilities(mon); - if (ret == 0) { - int hmp = qemuMonitorJSONCheckHMP(mon); - if (hmp < 0) { - /* qemu may quited unexpectedly when we call - * qemuMonitorJSONCheckHMP() */ - ret = -1; - } else { - mon->json_hmp = hmp > 0; - } - } + if (ret) + goto cleanup; + + ret = qemuMonitorJSONCheckCommands(mon, qemuCaps, &json_hmp); + mon->json_hmp = json_hmp > 0;
Looks nice. -- Eric Blake eblake@redhat.com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On 10.02.2012 22:25, Eric Blake wrote:
On 02/10/2012 04:25 AM, Michal Privoznik wrote:
via user agent. --- diff to v1: -allow mem and hybrid targets iff system_wakeup monitor command is presented
Before I review too much of this, remember that there is still a pending discussion that might rename the API slightly to add in the wakeup.
src/qemu/qemu_agent.c | 31 +++++++++++++++ src/qemu/qemu_agent.h | 2 + src/qemu/qemu_capabilities.c | 1 + src/qemu/qemu_capabilities.h | 1 + src/qemu/qemu_driver.c | 87 ++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor.c | 21 +++++----- src/qemu/qemu_monitor.h | 4 +- src/qemu/qemu_monitor_json.c | 15 ++++--- src/qemu/qemu_monitor_json.h | 5 ++- src/qemu/qemu_process.c | 2 +- 10 files changed, 149 insertions(+), 20 deletions(-)
I'm wondering if it might be worth dividing this into two patches - the first that refactors qemu_capabilities and qemu_monitor to set QEMU_CAPS_WAKEUP, and the second that touches qemu_agent, qemu_driver, and qemu_monitor to wire up the suspend command.
Another thing that would be nice to do would be improving the qemuhelptest.c to allow the testsuite to start probing for JSON responses; I would add some new files qemuhelpdata/qemu-*-monitor, which contains the JSON output corresponding to the query-commands input for the varous qemu versions that support JSON, so we make sure we can enable caps according to what json strings we parse.
Yeah, but I'd like to have this in a separate patch, as it is not trivial and a bit overkill for this functionality I am adding.
+static int +qemuDomainPMSuspendForDuration(virDomainPtr dom, + unsigned int target, + unsigned long long duration, + unsigned int flags) +{ + struct qemud_driver *driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; + virDomainObjPtr vm; + int ret = -1; + + virCheckFlags(0, -1); + + if (duration) { + qemuReportError(VIR_ERR_INVALID_ARG, "%s",
VIR_ERR_ARGUMENT_UNSUPPORTED - the call was valid per the documentation, but we don't support it.
+ _("Duration not supported. Use 0 for now")); + return -1; + } + + if (!(target == VIR_NODE_SUSPEND_TARGET_MEM || + target == VIR_NODE_SUSPEND_TARGET_DISK || + target == VIR_NODE_SUSPEND_TARGET_HYBRID)) { + qemuReportError(VIR_ERR_INVALID_ARG,
This one's fine - the call really is invalid per the documentation.
+ + if (priv->agentError) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("QEMU guest agent is not available due to an error")); + goto cleanup; + }
Meta-question - is it right to have a set-once priv->agentError flag? Or, since the agent can come and go at will, an error encountered previously might be overcome if this time around we send a new sync flag. Thus, would it be better to have a dynamic function call, where the task of re-synchronizing to the guest is done at that point, and which either returns 0 if the guest is currently in sync or -1 if the resync failed for any reason? But this would affect all uses of the guest agent, so for now, keeping this call consistent with other callers is okay.
Right. However, we are still pending a patch for issuing 'guest-sync' command which can solve this issue for us. I mean, then we can drop whole priv->agentError;
+ + if (!priv->agent) { + qemuReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("QEMU guest agent is not configured")); + goto cleanup; + }
In fact, if you have a function call, then you could inline the priv->agent check into that function, for less redundancy in all callers that need an active agent.
Agree, but then again - a separate patch.
+ + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup;
Hmm - if we do have a helper function to resync the agent, it would have to be done inside the job condition.
@@ -1009,19 +1011,16 @@ int qemuMonitorSetCapabilities(qemuMonitorPtr mon)
if (mon->json) { ret = qemuMonitorJSONSetCapabilities(mon); - if (ret == 0) { - int hmp = qemuMonitorJSONCheckHMP(mon); - if (hmp < 0) { - /* qemu may quited unexpectedly when we call - * qemuMonitorJSONCheckHMP() */ - ret = -1; - } else { - mon->json_hmp = hmp > 0; - } - } + if (ret) + goto cleanup; + + ret = qemuMonitorJSONCheckCommands(mon, qemuCaps, &json_hmp); + mon->json_hmp = json_hmp > 0;
Looks nice.
Okay, will send another version. Thanks.
participants (2)
-
Eric Blake
-
Michal Privoznik