Statistics provided through PCS SDK. As we have only async interface in SDK we
need to be subscribed to statistics in order to get it. Trivial solution on
every stat request to subscribe, wait event and then unsubscribe will lead to
significant delays in case of a number of successive requests, as the event
will be delivered on next PCS server notify cycle. On the other hand we don't
want to keep unnesessary subscribtion. So we take an hibrid solution to
subcsribe on first request and then keep a subscription while requests are
active. We populate cache of statistics on subscribtion events and use this
cache to serve libvirts requests.
* Cache details.
Cache is just handle to last arrived event, we call this cache
as if this handle is valid it is used to serve synchronous
statistics requests. We use number of successive events count
to detect that user lost interest to statistics. We reset this
count to 0 on every request. If more than PARALLELS_STATISTICS_DROP_COUNT
successive events arrive we unsubscribe. Special value of -1
of this counter is used to differentiate between subscribed/unsubscribed state
to protect from delayed events.
Values of PARALLELS_STATISTICS_DROP_COUNT and PARALLELS_STATISTICS_TIMEOUT are
just drop-ins, choosen without special consideration.
* Thread safety issues
Use parallelsDomObjFromDomainRef in parallelsDomainBlockStats as
we could wait on domain lock down on stack in prlsdkGetStatsParam
and if we won't keep reference we could get dangling pointer
on return from wait.
Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy(a)parallels.com>
---
src/parallels/parallels_driver.c | 106 +++++++++++++++++++++
src/parallels/parallels_sdk.c | 189 ++++++++++++++++++++++++++++++++++++++
src/parallels/parallels_sdk.h | 2 +
src/parallels/parallels_utils.c | 28 ++++++
src/parallels/parallels_utils.h | 18 ++++
5 files changed, 343 insertions(+), 0 deletions(-)
diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c
index 4b87213..33c112e 100644
--- a/src/parallels/parallels_driver.c
+++ b/src/parallels/parallels_driver.c
@@ -51,6 +51,7 @@
#include "nodeinfo.h"
#include "virstring.h"
#include "cpu/cpu.h"
+#include "virtypedparam.h"
#include "parallels_driver.h"
#include "parallels_utils.h"
@@ -1179,6 +1180,109 @@ parallelsDomainGetMaxMemory(virDomainPtr domain)
return ret;
}
+static int
+parallelsDomainBlockStats(virDomainPtr domain, const char *path,
+ virDomainBlockStatsPtr stats)
+{
+ virDomainObjPtr dom = NULL;
+ int ret = -1;
+ size_t i;
+ int idx;
+
+ if (!(dom = parallelsDomObjFromDomainRef(domain)))
+ return -1;
+
+ if (*path) {
+ if ((idx = virDomainDiskIndexByName(dom->def, path, false)) < 0) {
+ virReportError(VIR_ERR_INVALID_ARG, _("invalid path: %s"), path);
+ goto cleanup;
+ }
+ if (prlsdkGetBlockStats(dom, dom->def->disks[idx], stats) < 0)
+ goto cleanup;
+ } else {
+ virDomainBlockStatsStruct s;
+
+#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME) \
+ stats->VAR = 0;
+
+ PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS)
+
+#undef PARALLELS_ZERO_STATS
+
+ for (i = 0; i < dom->def->ndisks; i++) {
+ if (prlsdkGetBlockStats(dom, dom->def->disks[i], &s) < 0)
+ goto cleanup;
+
+#define PARALLELS_SUM_STATS(VAR, TYPE, NAME) \
+ if (s.VAR != -1) \
+ stats->VAR += s.VAR;
+
+ PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS)
+
+#undef PARALLELS_SUM_STATS
+ }
+ }
+ stats->errs = -1;
+ ret = 0;
+
+ cleanup:
+ if (dom)
+ virDomainObjEndAPI(&dom);
+
+ return ret;
+}
+
+static int
+parallelsDomainBlockStatsFlags(virDomainPtr domain,
+ const char *path,
+ virTypedParameterPtr params,
+ int *nparams,
+ unsigned int flags)
+{
+ virDomainBlockStatsStruct stats;
+ int ret = -1;
+ size_t i;
+
+ virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1);
+ /* We don't return strings, and thus trivially support this flag. */
+ flags &= ~VIR_TYPED_PARAM_STRING_OKAY;
+
+ if (parallelsDomainBlockStats(domain, path, &stats) < 0)
+ goto cleanup;
+
+ if (*nparams == 0) {
+#define PARALLELS_COUNT_STATS(VAR, TYPE, NAME) \
+ if ((stats.VAR) != -1) \
+ ++*nparams;
+
+ PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_COUNT_STATS)
+
+#undef PARALLELS_COUNT_STATS
+ ret = 0;
+ goto cleanup;
+ }
+
+ i = 0;
+#define PARALLELS_BLOCK_STATS_ASSIGN_PARAM(VAR, TYPE, NAME) \
+ if (i < *nparams && (stats.VAR) != -1) {
\
+ if (virTypedParameterAssign(params + i, TYPE, \
+ VIR_TYPED_PARAM_LLONG, (stats.VAR)) < 0) \
+ goto cleanup; \
+ i++; \
+ }
+
+ PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_BLOCK_STATS_ASSIGN_PARAM)
+
+#undef PARALLELS_BLOCK_STATS_ASSIGN_PARAM
+
+ *nparams = i;
+ ret = 0;
+
+ cleanup:
+ return ret;
+}
+
+
static virHypervisorDriver parallelsDriver = {
.name = "Parallels",
.connectOpen = parallelsConnectOpen, /* 0.10.0 */
@@ -1228,6 +1332,8 @@ static virHypervisorDriver parallelsDriver = {
.domainManagedSave = parallelsDomainManagedSave, /* 1.2.14 */
.domainManagedSaveRemove = parallelsDomainManagedSaveRemove, /* 1.2.14 */
.domainGetMaxMemory = parallelsDomainGetMaxMemory, /* 1.2.15 */
+ .domainBlockStats = parallelsDomainBlockStats, /* 1.2.17 */
+ .domainBlockStatsFlags = parallelsDomainBlockStatsFlags, /* 1.2.17 */
};
static virConnectDriver parallelsConnectDriver = {
diff --git a/src/parallels/parallels_sdk.c b/src/parallels/parallels_sdk.c
index d5a9790..8c52fce 100644
--- a/src/parallels/parallels_sdk.c
+++ b/src/parallels/parallels_sdk.c
@@ -21,6 +21,7 @@
*/
#include <config.h>
+#include <stdarg.h>
#include "virerror.h"
#include "viralloc.h"
@@ -29,6 +30,7 @@
#include "virlog.h"
#include "datatypes.h"
#include "domain_conf.h"
+#include "virtime.h"
#include "parallels_sdk.h"
@@ -407,6 +409,8 @@ prlsdkDomObjFreePrivate(void *p)
return;
PrlHandle_Free(pdom->sdkdom);
+ PrlHandle_Free(pdom->cache.stats);
+ virCondDestroy(&pdom->cache.cond);
VIR_FREE(pdom->uuid);
VIR_FREE(pdom->home);
VIR_FREE(p);
@@ -1260,6 +1264,13 @@ prlsdkLoadDomain(parallelsConnPtr privconn,
* to NULL temporarily */
pdom->uuid = NULL;
+ pdom->cache.stats = PRL_INVALID_HANDLE;
+ pdom->cache.count = -1;
+ if (virCondInit(&pdom->cache.cond) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("cannot initialize
condition"));
+ goto error;
+ }
+
if (prlsdkGetDomainIds(sdkdom, &def->name, def->uuid) < 0)
goto error;
@@ -1635,6 +1646,51 @@ prlsdkHandleVmRemovedEvent(parallelsConnPtr privconn,
return pret;
}
+#define PARALLELS_STATISTICS_DROP_COUNT 3
+
+static PRL_RESULT
+prlsdkHandlePerfEvent(parallelsConnPtr privconn,
+ PRL_HANDLE event,
+ unsigned char *uuid)
+{
+ virDomainObjPtr dom = NULL;
+ parallelsDomObjPtr privdom = NULL;
+ PRL_HANDLE job = PRL_INVALID_HANDLE;
+
+ dom = virDomainObjListFindByUUID(privconn->domains, uuid);
+ if (dom == NULL)
+ goto cleanup;
+ privdom = dom->privateData;
+
+ // delayed event after unsubscribe
+ if (privdom->cache.count == -1)
+ goto cleanup;
+
+ PrlHandle_Free(privdom->cache.stats);
+ privdom->cache.stats = PRL_INVALID_HANDLE;
+
+ if (privdom->cache.count > PARALLELS_STATISTICS_DROP_COUNT) {
+ job = PrlVm_UnsubscribeFromPerfStats(privdom->sdkdom);
+ if (PRL_FAILED(waitJob(job)))
+ goto cleanup;
+ // change state to unsubscribed
+ privdom->cache.count = -1;
+ } else {
+ ++privdom->cache.count;
+ privdom->cache.stats = event;
+ // thus we get own of event handle
+ event = PRL_INVALID_HANDLE;
+ virCondSignal(&privdom->cache.cond);
+ }
+
+ cleanup:
+ PrlHandle_Free(event);
+ if (dom)
+ virObjectUnlock(dom);
+
+ return PRL_ERR_SUCCESS;
+}
+
static PRL_RESULT
prlsdkHandleVmEvent(parallelsConnPtr privconn, PRL_HANDLE prlEvent)
{
@@ -1665,6 +1721,11 @@ prlsdkHandleVmEvent(parallelsConnPtr privconn, PRL_HANDLE
prlEvent)
case PET_DSP_EVT_VM_UNREGISTERED:
return prlsdkHandleVmRemovedEvent(privconn, uuid);
break;
+ case PET_DSP_EVT_VM_PERFSTATS:
+ prlsdkHandlePerfEvent(privconn, prlEvent, uuid);
+ // above function takes own of event
+ prlEvent = PRL_INVALID_HANDLE;
+ break;
default:
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Can't handle event of type %d"), prlEventType);
@@ -1714,6 +1775,7 @@ prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque)
return pret;
}
+
int prlsdkSubscribeToPCSEvents(parallelsConnPtr privconn)
{
PRL_RESULT pret = PRL_ERR_UNINITIALIZED;
@@ -3450,3 +3512,130 @@ prlsdkDomainManagedSaveRemove(virDomainObjPtr dom)
return 0;
}
+
+static int
+prlsdkExtractStatsParam(PRL_HANDLE sdkstats, const char *name, long long *val)
+{
+ PRL_HANDLE param = PRL_INVALID_HANDLE;
+ PRL_RESULT pret;
+ PRL_INT64 pval = 0;
+ int ret = -1;
+
+ pret = PrlEvent_GetParamByName(sdkstats, name, ¶m);
+ if (pret == PRL_ERR_NO_DATA) {
+ *val = -1;
+ ret = 0;
+ goto cleanup;
+ } else if (PRL_FAILED(pret)) {
+ logPrlError(pret);
+ goto cleanup;
+ }
+ pret = PrlEvtPrm_ToInt64(param, &pval);
+ prlsdkCheckRetGoto(pret, cleanup);
+
+ *val = pval;
+ ret = 0;
+
+ cleanup:
+ PrlHandle_Free(param);
+ return ret;
+}
+
+#define PARALLELS_STATISTICS_TIMEOUT (60 * 1000)
+
+static int
+prlsdkGetStatsParam(virDomainObjPtr dom, const char *name, long long *val)
+{
+ parallelsDomObjPtr privdom = dom->privateData;
+ PRL_HANDLE job = PRL_INVALID_HANDLE;
+ unsigned long long now;
+
+ if (privdom->cache.stats != PRL_INVALID_HANDLE) {
+ // reset count to keep subscribtion
+ privdom->cache.count = 0;
+ return prlsdkExtractStatsParam(privdom->cache.stats, name, val);
+ }
+
+ if (privdom->cache.count == -1) {
+ job = PrlVm_SubscribeToPerfStats(privdom->sdkdom, NULL);
+ if (PRL_FAILED(waitJob(job)))
+ goto error;
+ }
+
+ // change state to subscribed in case of unsubscribed
+ // or reset count so we stop unsubscribe attempts
+ privdom->cache.count = 0;
+
+ if (virTimeMillisNow(&now) < 0) {
+ virReportSystemError(errno, "%s", _("Unable to get current
time"));
+ goto error;
+ }
+
+ while (privdom->cache.stats == PRL_INVALID_HANDLE) {
+ if (virCondWaitUntil(&privdom->cache.cond, &dom->parent.lock,
+ now + PARALLELS_STATISTICS_TIMEOUT) < 0) {
+ if (errno == ETIMEDOUT) {
+ virReportError(VIR_ERR_OPERATION_TIMEOUT, "%s",
+ _("Timeout on waiting statistics event."));
+ goto error;
+ } else {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("Unable to wait on monitor condition"));
+ goto error;
+ }
+ }
+ }
+
+ return prlsdkExtractStatsParam(privdom->cache.stats, name, val);
+ error:
+ return -1;
+}
+
+int
+prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr
stats)
+{
+ virDomainDeviceDriveAddressPtr address;
+ int idx;
+ const char *prefix;
+ int ret = -1;
+ char *name = NULL;
+
+ address = &disk->info.addr.drive;
+ switch (disk->bus) {
+ case VIR_DOMAIN_DISK_BUS_IDE:
+ prefix = "ide";
+ idx = address->bus * 2 + address->unit;
+ break;
+ case VIR_DOMAIN_DISK_BUS_SATA:
+ prefix = "sata";
+ idx = address->unit;
+ break;
+ case VIR_DOMAIN_DISK_BUS_SCSI:
+ prefix = "scsi";
+ idx = address->unit;
+ break;
+ default:
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unknown disk bus: %X"), disk->bus);
+ goto cleanup;
+ }
+
+
+#define PRLSDK_GET_STAT_PARAM(VAL, TYPE, NAME) \
+ if (virAsprintf(&name, "devices.%s%d.%s", prefix, idx, NAME) < 0)
\
+ goto cleanup; \
+ if (prlsdkGetStatsParam(dom, name, &stats->VAL) < 0) \
+ goto cleanup; \
+ VIR_FREE(name);
+
+ PARALLELS_BLOCK_STATS_FOREACH(PRLSDK_GET_STAT_PARAM)
+
+#undef PRLSDK_GET_STAT_PARAM
+
+ ret = 0;
+
+ cleanup:
+
+ VIR_FREE(name);
+ return ret;
+}
diff --git a/src/parallels/parallels_sdk.h b/src/parallels/parallels_sdk.h
index 3f17fc8..afa6745 100644
--- a/src/parallels/parallels_sdk.h
+++ b/src/parallels/parallels_sdk.h
@@ -64,3 +64,5 @@ int
prlsdkAttachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk);
int
prlsdkDetachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk);
+int
+prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr
stats);
diff --git a/src/parallels/parallels_utils.c b/src/parallels/parallels_utils.c
index ff9d47d..540986b 100644
--- a/src/parallels/parallels_utils.c
+++ b/src/parallels/parallels_utils.c
@@ -64,6 +64,34 @@ parallelsDomObjFromDomain(virDomainPtr domain)
}
+/**
+ * parallelsDomObjFromDomainRef:
+ * @domain: Domain pointer that has to be looked up
+ *
+ * This function looks up @domain and returns the appropriate virDomainObjPtr
+ * that has to be released by calling virDomainObjEndAPI().
+ *
+ * Returns the domain object with incremented reference counter which is locked
+ * on success, NULL otherwise.
+ */
+virDomainObjPtr
+parallelsDomObjFromDomainRef(virDomainPtr domain)
+{
+ virDomainObjPtr vm;
+ parallelsConnPtr privconn = domain->conn->privateData;
+ char uuidstr[VIR_UUID_STRING_BUFLEN];
+
+ vm = virDomainObjListFindByUUIDRef(privconn->domains, domain->uuid);
+ if (!vm) {
+ virUUIDFormat(domain->uuid, uuidstr);
+ virReportError(VIR_ERR_NO_DOMAIN,
+ _("no domain with matching uuid '%s' (%s)"),
+ uuidstr, domain->name);
+ return NULL;
+ }
+
+ return vm;
+}
static int
parallelsDoCmdRun(char **outbuf, const char *binary, va_list list)
diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h
index 2d1d405..cdf6082 100644
--- a/src/parallels/parallels_utils.h
+++ b/src/parallels/parallels_utils.h
@@ -73,11 +73,22 @@ struct _parallelsConn {
typedef struct _parallelsConn parallelsConn;
typedef struct _parallelsConn *parallelsConnPtr;
+struct _parallelsContersCache {
+ PRL_HANDLE stats;
+ virCond cond;
+ // -1 - unsubscribed
+ // > -1 - subscribed
+ int count;
+};
+
+typedef struct _parallelsContersCache parallelsContersCache;
+
struct parallelsDomObj {
int id;
char *uuid;
char *home;
PRL_HANDLE sdkdom;
+ parallelsContersCache cache;
};
typedef struct parallelsDomObj *parallelsDomObjPtr;
@@ -91,6 +102,7 @@ int parallelsNetworkClose(virConnectPtr conn);
extern virNetworkDriver parallelsNetworkDriver;
virDomainObjPtr parallelsDomObjFromDomain(virDomainPtr domain);
+virDomainObjPtr parallelsDomObjFromDomainRef(virDomainPtr domain);
virJSONValuePtr parallelsParseOutput(const char *binary, ...)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_SENTINEL;
@@ -106,4 +118,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr
conn,
int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool,
virStorageVolDefPtr privvol);
+#define PARALLELS_BLOCK_STATS_FOREACH(OP) \
+ OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, "read_requests") \
+ OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, "read_total") \
+ OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, "write_requests") \
+ OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, "write_total")
+
#endif
--
1.7.1