[libvirt] [PATCH] parallels: add block device statistics to driver

Statistics provided through PCS SDK. As we have only async interface in SDK we need to be subscribed to statistics in order to get it. Trivial solution on every stat request to subscribe, wait event and then unsubscribe will lead to significant delays in case of a number of successive requests, as the event will be delivered on next PCS server notify cycle. On the other hand we don't want to keep unnesessary subscribtion. So we take an hibrid solution to subcsribe on first request and then keep a subscription while requests are active. We populate cache of statistics on subscribtion events and use this cache to serve libvirts requests. Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy@parallels.com> --- src/parallels/parallels_driver.c | 106 +++++++++++++++++++++ src/parallels/parallels_sdk.c | 193 ++++++++++++++++++++++++++++++++------ src/parallels/parallels_sdk.h | 2 + src/parallels/parallels_utils.h | 15 +++ 4 files changed, 285 insertions(+), 31 deletions(-) diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c index 4b87213..ce59e00 100644 --- a/src/parallels/parallels_driver.c +++ b/src/parallels/parallels_driver.c @@ -51,6 +51,7 @@ #include "nodeinfo.h" #include "virstring.h" #include "cpu/cpu.h" +#include "virtypedparam.h" #include "parallels_driver.h" #include "parallels_utils.h" @@ -1179,6 +1180,109 @@ parallelsDomainGetMaxMemory(virDomainPtr domain) return ret; } +static int +parallelsDomainBlockStats(virDomainPtr domain, const char *path, + virDomainBlockStatsPtr stats) +{ + virDomainObjPtr dom = NULL; + int ret = -1; + size_t i; + int idx; + + if (!(dom = parallelsDomObjFromDomain(domain))) + return -1; + + if (*path) { + if ((idx = virDomainDiskIndexByName(dom->def, path, false)) < 0) { + virReportError(VIR_ERR_INVALID_ARG, _("invalid path: %s"), path); + goto cleanup; + } + if (prlsdkGetBlockStats(dom, dom->def->disks[idx], stats) < 0) + goto cleanup; + } else { + virDomainBlockStatsStruct s; + +#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME) \ + stats->VAR = 0; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS) + +#undef PARALLELS_ZERO_STATS + + for (i = 0; i < dom->def->ndisks; i++) { + if (prlsdkGetBlockStats(dom, dom->def->disks[i], &s) < 0) + goto cleanup; + +#define PARALLELS_SUM_STATS(VAR, TYPE, NAME) \ + if (s.VAR != -1) \ + stats->VAR += s.VAR; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS) + +#undef PARALLELS_SUM_STATS + } + } + stats->errs = -1; + ret = 0; + + cleanup: + if (dom) + virObjectUnlock(dom); + + return ret; +} + +static int +parallelsDomainBlockStatsFlags(virDomainPtr domain, + const char *path, + virTypedParameterPtr params, + int *nparams, + unsigned int flags) +{ + virDomainBlockStatsStruct stats; + int ret = -1; + size_t i; + + virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); + /* We don't return strings, and thus trivially support this flag. */ + flags &= ~VIR_TYPED_PARAM_STRING_OKAY; + + if (parallelsDomainBlockStats(domain, path, &stats) < 0) + goto cleanup; + + if (*nparams == 0) { +#define PARALLELS_COUNT_STATS(VAR, TYPE, NAME) \ + if ((stats.VAR) != -1) \ + ++*nparams; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_COUNT_STATS) + +#undef PARALLELS_COUNT_STATS + ret = 0; + goto cleanup; + } + + i = 0; +#define PARALLELS_BLOCK_STATS_ASSIGN_PARAM(VAR, TYPE, NAME) \ + if (i < *nparams && (stats.VAR) != -1) { \ + if (virTypedParameterAssign(params + i, TYPE, \ + VIR_TYPED_PARAM_LLONG, (stats.VAR)) < 0) \ + goto cleanup; \ + i++; \ + } + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_BLOCK_STATS_ASSIGN_PARAM) + +#undef PARALLELS_BLOCK_STATS_ASSIGN_PARAM + + *nparams = i; + ret = 0; + + cleanup: + return ret; +} + + static virHypervisorDriver parallelsDriver = { .name = "Parallels", .connectOpen = parallelsConnectOpen, /* 0.10.0 */ @@ -1228,6 +1332,8 @@ static virHypervisorDriver parallelsDriver = { .domainManagedSave = parallelsDomainManagedSave, /* 1.2.14 */ .domainManagedSaveRemove = parallelsDomainManagedSaveRemove, /* 1.2.14 */ .domainGetMaxMemory = parallelsDomainGetMaxMemory, /* 1.2.15 */ + .domainBlockStats = parallelsDomainBlockStats, /* 1.2.16 */ + .domainBlockStatsFlags = parallelsDomainBlockStatsFlags, /* 1.2.16 */ }; static virConnectDriver parallelsConnectDriver = { diff --git a/src/parallels/parallels_sdk.c b/src/parallels/parallels_sdk.c index 88ad59b..eb8d965 100644 --- a/src/parallels/parallels_sdk.c +++ b/src/parallels/parallels_sdk.c @@ -21,6 +21,7 @@ */ #include <config.h> +#include <stdarg.h> #include "virerror.h" #include "viralloc.h" @@ -407,6 +408,8 @@ prlsdkDomObjFreePrivate(void *p) return; PrlHandle_Free(pdom->sdkdom); + PrlHandle_Free(pdom->cache.stats); + virCondDestroy(&pdom->cache.cond); VIR_FREE(pdom->uuid); VIR_FREE(pdom->home); VIR_FREE(p); @@ -1260,6 +1263,13 @@ prlsdkLoadDomain(parallelsConnPtr privconn, * to NULL temporarily */ pdom->uuid = NULL; + pdom->cache.stats = PRL_INVALID_HANDLE; + pdom->cache.count = 0; + if (virCondInit(&pdom->cache.cond) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("cannot initialize condition")); + goto error; + } + if (prlsdkGetDomainIds(sdkdom, &def->name, def->uuid) < 0) goto error; @@ -1636,43 +1646,39 @@ prlsdkHandleVmRemovedEvent(parallelsConnPtr privconn, } static PRL_RESULT -prlsdkHandleVmEvent(parallelsConnPtr privconn, PRL_HANDLE prlEvent) +prlsdkHandlePerfEvent(parallelsConnPtr privconn, + PRL_HANDLE event, + unsigned char *uuid) { - PRL_RESULT pret; - char uuidstr[VIR_UUID_STRING_BUFLEN + 2]; - unsigned char uuid[VIR_UUID_BUFLEN]; - PRL_UINT32 bufsize = ARRAY_CARDINALITY(uuidstr); - PRL_EVENT_TYPE prlEventType; + virDomainObjPtr dom = NULL; + parallelsDomObjPtr privdom = NULL; + PRL_HANDLE job = PRL_INVALID_HANDLE; - pret = PrlEvent_GetType(prlEvent, &prlEventType); - prlsdkCheckRetGoto(pret, error); + dom = virDomainObjListFindByUUID(privconn->domains, uuid); + if (dom == NULL) + goto cleanup; - pret = PrlEvent_GetIssuerId(prlEvent, uuidstr, &bufsize); - prlsdkCheckRetGoto(pret, error); + privdom = dom->privateData; - if (prlsdkUUIDParse(uuidstr, uuid) < 0) - return PRL_ERR_FAILURE; + PrlHandle_Free(privdom->cache.stats); + privdom->cache.stats = event; + virCondSignal(&privdom->cache.cond); - switch (prlEventType) { - case PET_DSP_EVT_VM_STATE_CHANGED: - return prlsdkHandleVmStateEvent(privconn, prlEvent, uuid); - case PET_DSP_EVT_VM_CONFIG_CHANGED: - return prlsdkHandleVmConfigEvent(privconn, uuid); - case PET_DSP_EVT_VM_CREATED: - case PET_DSP_EVT_VM_ADDED: - return prlsdkHandleVmAddedEvent(privconn, uuid); - case PET_DSP_EVT_VM_DELETED: - case PET_DSP_EVT_VM_UNREGISTERED: - return prlsdkHandleVmRemovedEvent(privconn, uuid); - break; - default: - virReportError(VIR_ERR_INTERNAL_ERROR, - _("Can't handle event of type %d"), prlEventType); - return PRL_ERR_FAILURE; + ++privdom->cache.count; + + if (privdom->cache.count > 3) { + job = PrlVm_UnsubscribeFromPerfStats(privdom->sdkdom); + if (PRL_FAILED(waitJob(job))) + goto cleanup; + privdom->cache.count = 0; + privdom->cache.stats = PRL_INVALID_HANDLE; } - error: - return PRL_ERR_FAILURE; + cleanup: + if (dom) + virObjectUnlock(dom); + + return PRL_ERR_SUCCESS; } static PRL_RESULT @@ -1682,6 +1688,9 @@ prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque) PRL_RESULT pret = PRL_ERR_UNINITIALIZED; PRL_HANDLE_TYPE handleType; PRL_EVENT_TYPE prlEventType; + char uuidstr[VIR_UUID_STRING_BUFLEN + 2]; + unsigned char uuid[VIR_UUID_BUFLEN]; + PRL_UINT32 bufsize = ARRAY_CARDINALITY(uuidstr); pret = PrlHandle_GetType(prlEvent, &handleType); prlsdkCheckRetGoto(pret, cleanup); @@ -1700,14 +1709,31 @@ prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque) PrlEvent_GetType(prlEvent, &prlEventType); prlsdkCheckRetGoto(pret, cleanup); + pret = PrlEvent_GetIssuerId(prlEvent, uuidstr, &bufsize); + prlsdkCheckRetGoto(pret, cleanup); + + if (prlsdkUUIDParse(uuidstr, uuid) < 0) + goto cleanup; + switch (prlEventType) { case PET_DSP_EVT_VM_STATE_CHANGED: + prlsdkHandleVmStateEvent(privconn, prlEvent, uuid); + break; case PET_DSP_EVT_VM_CONFIG_CHANGED: + prlsdkHandleVmConfigEvent(privconn, uuid); + break; case PET_DSP_EVT_VM_CREATED: case PET_DSP_EVT_VM_ADDED: + prlsdkHandleVmAddedEvent(privconn, uuid); + break; case PET_DSP_EVT_VM_DELETED: case PET_DSP_EVT_VM_UNREGISTERED: - pret = prlsdkHandleVmEvent(privconn, prlEvent); + prlsdkHandleVmRemovedEvent(privconn, uuid); + break; + case PET_DSP_EVT_VM_PERFSTATS: + prlsdkHandlePerfEvent(privconn, prlEvent, uuid); + // above function takes own of event + prlEvent = PRL_INVALID_HANDLE; break; default: VIR_DEBUG("Skipping event of type %d", prlEventType); @@ -3455,3 +3481,108 @@ prlsdkDomainManagedSaveRemove(virDomainObjPtr dom) return 0; } + +static int +prlsdkExtractStatsParam(PRL_HANDLE sdkstats, const char *name, long long *val) +{ + PRL_HANDLE param = PRL_INVALID_HANDLE; + PRL_RESULT pret; + PRL_INT64 pval = 0; + int ret = -1; + + pret = PrlEvent_GetParamByName(sdkstats, name, ¶m); + if (pret == PRL_ERR_NO_DATA) { + *val = -1; + ret = 0; + goto cleanup; + } else if (PRL_FAILED(pret)) { + logPrlError(pret); + goto cleanup; + } + pret = PrlEvtPrm_ToInt64(param, &pval); + prlsdkCheckRetGoto(pret, cleanup); + + *val = pval; + ret = 0; + + cleanup: + PrlHandle_Free(param); + return ret; +} + +static int +prlsdkGetStatsParam(virDomainObjPtr dom, const char *name, long long *val) +{ + parallelsDomObjPtr privdom = dom->privateData; + PRL_HANDLE job = PRL_INVALID_HANDLE; + + if (privdom->cache.stats != PRL_INVALID_HANDLE) { + privdom->cache.count = 0; + return prlsdkExtractStatsParam(privdom->cache.stats, name, val); + } + + job = PrlVm_SubscribeToPerfStats(privdom->sdkdom, NULL); + if (PRL_FAILED(waitJob(job))) + goto error; + + while (privdom->cache.stats == PRL_INVALID_HANDLE) { + if (virCondWait(&privdom->cache.cond, &dom->parent.lock) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Unable to wait on monitor condition")); + goto error; + } + } + + return prlsdkExtractStatsParam(privdom->cache.stats, name, val); + error: + return -1; +} + +int +prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats) +{ + virDomainDeviceDriveAddressPtr address; + int idx; + const char *prefix; + int ret = -1; + char *name = NULL; + + address = &disk->info.addr.drive; + switch (disk->bus) { + case VIR_DOMAIN_DISK_BUS_IDE: + prefix = "ide"; + idx = address->bus * 2 + address->unit; + break; + case VIR_DOMAIN_DISK_BUS_SATA: + prefix = "sata"; + idx = address->unit; + break; + case VIR_DOMAIN_DISK_BUS_SCSI: + prefix = "scsi"; + idx = address->unit; + break; + default: + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unknown disk bus: %X"), disk->bus); + goto cleanup; + } + + +#define PRLSDK_GET_STAT_PARAM(VAL, TYPE, NAME) \ + if (virAsprintf(&name, "devices.%s%d.%s", prefix, idx, NAME) < 0) \ + goto cleanup; \ + if (prlsdkGetStatsParam(dom, name, &stats->VAL) < 0) \ + goto cleanup; \ + VIR_FREE(name); + + PARALLELS_BLOCK_STATS_FOREACH(PRLSDK_GET_STAT_PARAM) + +#undef PRLSDK_GET_STAT_PARAM + + ret = 0; + + cleanup: + + VIR_FREE(name); + return ret; +} diff --git a/src/parallels/parallels_sdk.h b/src/parallels/parallels_sdk.h index 3f17fc8..afa6745 100644 --- a/src/parallels/parallels_sdk.h +++ b/src/parallels/parallels_sdk.h @@ -64,3 +64,5 @@ int prlsdkAttachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk); int prlsdkDetachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk); +int +prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats); diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h index 2d1d405..e424405 100644 --- a/src/parallels/parallels_utils.h +++ b/src/parallels/parallels_utils.h @@ -73,11 +73,20 @@ struct _parallelsConn { typedef struct _parallelsConn parallelsConn; typedef struct _parallelsConn *parallelsConnPtr; +struct _parallelsContersCache { + PRL_HANDLE stats; + virCond cond; + int count; +}; + +typedef struct _parallelsContersCache parallelsContersCache; + struct parallelsDomObj { int id; char *uuid; char *home; PRL_HANDLE sdkdom; + parallelsContersCache cache; }; typedef struct parallelsDomObj *parallelsDomObjPtr; @@ -106,4 +115,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr conn, int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool, virStorageVolDefPtr privvol); +#define PARALLELS_BLOCK_STATS_FOREACH(OP) \ + OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, "read_requests") \ + OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, "read_total") \ + OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, "write_requests") \ + OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, "write_total") + #endif -- 1.7.1

On 05/22/2015 10:42 AM, Nikolay Shirokovskiy wrote:
Statistics provided through PCS SDK. As we have only async interface in SDK we need to be subscribed to statistics in order to get it. Trivial solution on every stat request to subscribe, wait event and then unsubscribe will lead to significant delays in case of a number of successive requests, as the event will be delivered on next PCS server notify cycle. On the other hand we don't want to keep unnesessary subscribtion. So we take an hibrid solution to subcsribe on first request and then keep a subscription while requests are active. We populate cache of statistics on subscribtion events and use this cache to serve libvirts requests.
Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy@parallels.com> --- src/parallels/parallels_driver.c | 106 +++++++++++++++++++++ src/parallels/parallels_sdk.c | 193 ++++++++++++++++++++++++++++++++------ src/parallels/parallels_sdk.h | 2 + src/parallels/parallels_utils.h | 15 +++ 4 files changed, 285 insertions(+), 31 deletions(-)
diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c index 4b87213..ce59e00 100644 --- a/src/parallels/parallels_driver.c +++ b/src/parallels/parallels_driver.c @@ -51,6 +51,7 @@ #include "nodeinfo.h" #include "virstring.h" #include "cpu/cpu.h" +#include "virtypedparam.h"
#include "parallels_driver.h" #include "parallels_utils.h" @@ -1179,6 +1180,109 @@ parallelsDomainGetMaxMemory(virDomainPtr domain) return ret; }
+static int +parallelsDomainBlockStats(virDomainPtr domain, const char *path, + virDomainBlockStatsPtr stats) +{ + virDomainObjPtr dom = NULL; + int ret = -1; + size_t i; + int idx; + + if (!(dom = parallelsDomObjFromDomain(domain))) + return -1; + + if (*path) { + if ((idx = virDomainDiskIndexByName(dom->def, path, false)) < 0) { + virReportError(VIR_ERR_INVALID_ARG, _("invalid path: %s"), path); + goto cleanup; + } + if (prlsdkGetBlockStats(dom, dom->def->disks[idx], stats) < 0) + goto cleanup; + } else { + virDomainBlockStatsStruct s; + +#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME) \ + stats->VAR = 0; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS) + +#undef PARALLELS_ZERO_STATS +
Why don't you use memset here?
+ for (i = 0; i < dom->def->ndisks; i++) { + if (prlsdkGetBlockStats(dom, dom->def->disks[i], &s) < 0) + goto cleanup; + +#define PARALLELS_SUM_STATS(VAR, TYPE, NAME) \ + if (s.VAR != -1) \ + stats->VAR += s.VAR; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS) + +#undef PARALLELS_SUM_STATS + } + } + stats->errs = -1; + ret = 0; + + cleanup: + if (dom) + virObjectUnlock(dom); + + return ret; +} + +static int +parallelsDomainBlockStatsFlags(virDomainPtr domain, + const char *path, + virTypedParameterPtr params, + int *nparams, + unsigned int flags) +{ + virDomainBlockStatsStruct stats; + int ret = -1; + size_t i; + + virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); + /* We don't return strings, and thus trivially support this flag. */ + flags &= ~VIR_TYPED_PARAM_STRING_OKAY; + + if (parallelsDomainBlockStats(domain, path, &stats) < 0) + goto cleanup; + + if (*nparams == 0) { +#define PARALLELS_COUNT_STATS(VAR, TYPE, NAME) \ + if ((stats.VAR) != -1) \ + ++*nparams; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_COUNT_STATS) + +#undef PARALLELS_COUNT_STATS + ret = 0; + goto cleanup; + } + + i = 0; +#define PARALLELS_BLOCK_STATS_ASSIGN_PARAM(VAR, TYPE, NAME) \ + if (i < *nparams && (stats.VAR) != -1) { \ + if (virTypedParameterAssign(params + i, TYPE, \ + VIR_TYPED_PARAM_LLONG, (stats.VAR)) < 0) \ + goto cleanup; \ + i++; \ + } + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_BLOCK_STATS_ASSIGN_PARAM) + +#undef PARALLELS_BLOCK_STATS_ASSIGN_PARAM + + *nparams = i; + ret = 0; + + cleanup: + return ret; +} + + static virHypervisorDriver parallelsDriver = { .name = "Parallels", .connectOpen = parallelsConnectOpen, /* 0.10.0 */ @@ -1228,6 +1332,8 @@ static virHypervisorDriver parallelsDriver = { .domainManagedSave = parallelsDomainManagedSave, /* 1.2.14 */ .domainManagedSaveRemove = parallelsDomainManagedSaveRemove, /* 1.2.14 */ .domainGetMaxMemory = parallelsDomainGetMaxMemory, /* 1.2.15 */ + .domainBlockStats = parallelsDomainBlockStats, /* 1.2.16 */ + .domainBlockStatsFlags = parallelsDomainBlockStatsFlags, /* 1.2.16 */
Could you, please, update there versions to 1.2.17?
};
static virConnectDriver parallelsConnectDriver = { diff --git a/src/parallels/parallels_sdk.c b/src/parallels/parallels_sdk.c index 88ad59b..eb8d965 100644 --- a/src/parallels/parallels_sdk.c +++ b/src/parallels/parallels_sdk.c @@ -21,6 +21,7 @@ */
....
+ goto cleanup; + switch (prlEventType) { case PET_DSP_EVT_VM_STATE_CHANGED: + prlsdkHandleVmStateEvent(privconn, prlEvent, uuid); + break; case PET_DSP_EVT_VM_CONFIG_CHANGED: + prlsdkHandleVmConfigEvent(privconn, uuid); + break; case PET_DSP_EVT_VM_CREATED: case PET_DSP_EVT_VM_ADDED: + prlsdkHandleVmAddedEvent(privconn, uuid); + break; case PET_DSP_EVT_VM_DELETED: case PET_DSP_EVT_VM_UNREGISTERED: - pret = prlsdkHandleVmEvent(privconn, prlEvent); + prlsdkHandleVmRemovedEvent(privconn, uuid); + break; + case PET_DSP_EVT_VM_PERFSTATS: + prlsdkHandlePerfEvent(privconn, prlEvent, uuid); + // above function takes own of event + prlEvent = PRL_INVALID_HANDLE; break; default: VIR_DEBUG("Skipping event of type %d", prlEventType); @@ -3455,3 +3481,108 @@ prlsdkDomainManagedSaveRemove(virDomainObjPtr dom)
return 0; } +
Could you describe the idea with stats cache in more detail? Why do you keer up to 3 prlsdk stats, but use only one? And where do you free these handles?
+static int +prlsdkExtractStatsParam(PRL_HANDLE sdkstats, const char *name, long long *val) +{ + PRL_HANDLE param = PRL_INVALID_HANDLE; + PRL_RESULT pret; + PRL_INT64 pval = 0; + int ret = -1; + + pret = PrlEvent_GetParamByName(sdkstats, name, ¶m); + if (pret == PRL_ERR_NO_DATA) { + *val = -1; + ret = 0; + goto cleanup; + } else if (PRL_FAILED(pret)) { + logPrlError(pret); + goto cleanup; + } + pret = PrlEvtPrm_ToInt64(param, &pval); + prlsdkCheckRetGoto(pret, cleanup); + + *val = pval; + ret = 0; + + cleanup: + PrlHandle_Free(param); + return ret; +} + +static int +prlsdkGetStatsParam(virDomainObjPtr dom, const char *name, long long *val) +{ + parallelsDomObjPtr privdom = dom->privateData; + PRL_HANDLE job = PRL_INVALID_HANDLE; + + if (privdom->cache.stats != PRL_INVALID_HANDLE) { + privdom->cache.count = 0; + return prlsdkExtractStatsParam(privdom->cache.stats, name, val); + } + + job = PrlVm_SubscribeToPerfStats(privdom->sdkdom, NULL); + if (PRL_FAILED(waitJob(job))) + goto error; + + while (privdom->cache.stats == PRL_INVALID_HANDLE) { + if (virCondWait(&privdom->cache.cond, &dom->parent.lock) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Unable to wait on monitor condition")); + goto error; + } + } + + return prlsdkExtractStatsParam(privdom->cache.stats, name, val); + error: + return -1; +} + +int +prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats) +{ + virDomainDeviceDriveAddressPtr address; + int idx; + const char *prefix; + int ret = -1; + char *name = NULL; + + address = &disk->info.addr.drive; + switch (disk->bus) { + case VIR_DOMAIN_DISK_BUS_IDE: + prefix = "ide"; + idx = address->bus * 2 + address->unit; + break; + case VIR_DOMAIN_DISK_BUS_SATA: + prefix = "sata"; + idx = address->unit; + break; + case VIR_DOMAIN_DISK_BUS_SCSI: + prefix = "scsi"; + idx = address->unit; + break; + default: + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unknown disk bus: %X"), disk->bus); + goto cleanup; + } +
I think this won't work if the domain has disks on different buses.
+ +#define PRLSDK_GET_STAT_PARAM(VAL, TYPE, NAME) \ + if (virAsprintf(&name, "devices.%s%d.%s", prefix, idx, NAME) < 0) \ + goto cleanup; \ + if (prlsdkGetStatsParam(dom, name, &stats->VAL) < 0) \ + goto cleanup; \ + VIR_FREE(name); + + PARALLELS_BLOCK_STATS_FOREACH(PRLSDK_GET_STAT_PARAM) + +#undef PRLSDK_GET_STAT_PARAM + + ret = 0; + + cleanup: + + VIR_FREE(name); + return ret; +} diff --git a/src/parallels/parallels_sdk.h b/src/parallels/parallels_sdk.h index 3f17fc8..afa6745 100644 --- a/src/parallels/parallels_sdk.h +++ b/src/parallels/parallels_sdk.h @@ -64,3 +64,5 @@ int prlsdkAttachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk); int prlsdkDetachVolume(virDomainObjPtr dom, virDomainDiskDefPtr disk); +int +prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats); diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h index 2d1d405..e424405 100644 --- a/src/parallels/parallels_utils.h +++ b/src/parallels/parallels_utils.h @@ -73,11 +73,20 @@ struct _parallelsConn { typedef struct _parallelsConn parallelsConn; typedef struct _parallelsConn *parallelsConnPtr;
+struct _parallelsContersCache { + PRL_HANDLE stats; + virCond cond; + int count; +}; + +typedef struct _parallelsContersCache parallelsContersCache; + struct parallelsDomObj { int id; char *uuid; char *home; PRL_HANDLE sdkdom; + parallelsContersCache cache; };
typedef struct parallelsDomObj *parallelsDomObjPtr; @@ -106,4 +115,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr conn, int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool, virStorageVolDefPtr privvol);
+#define PARALLELS_BLOCK_STATS_FOREACH(OP) \ + OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, "read_requests") \ + OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, "read_total") \ + OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, "write_requests") \ + OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, "write_total") + #endif

On 03.06.2015 15:16, Dmitry Guryanov wrote:
On 05/22/2015 10:42 AM, Nikolay Shirokovskiy wrote:
Statistics provided through PCS SDK. As we have only async interface in SDK we need to be subscribed to statistics in order to get it. Trivial solution on every stat request to subscribe, wait event and then unsubscribe will lead to significant delays in case of a number of successive requests, as the event will be delivered on next PCS server notify cycle. On the other hand we don't want to keep unnesessary subscribtion. So we take an hibrid solution to subcsribe on first request and then keep a subscription while requests are active. We populate cache of statistics on subscribtion events and use this cache to serve libvirts requests.
Signed-off-by: Nikolay Shirokovskiy <nshirokovskiy@parallels.com> --- src/parallels/parallels_driver.c | 106 +++++++++++++++++++++ src/parallels/parallels_sdk.c | 193 ++++++++++++++++++++++++++++++++------ src/parallels/parallels_sdk.h | 2 + src/parallels/parallels_utils.h | 15 +++ 4 files changed, 285 insertions(+), 31 deletions(-)
+parallelsDomainBlockStats(virDomainPtr domain, const char *path, + virDomainBlockStatsPtr stats) +{ + virDomainObjPtr dom = NULL; + int ret = -1; + size_t i; + int idx; + + if (!(dom = parallelsDomObjFromDomain(domain))) + return -1; + + if (*path) { + if ((idx = virDomainDiskIndexByName(dom->def, path, false)) < 0) { + virReportError(VIR_ERR_INVALID_ARG, _("invalid path: %s"), path); + goto cleanup; + } + if (prlsdkGetBlockStats(dom, dom->def->disks[idx], stats) < 0) + goto cleanup; + } else { + virDomainBlockStatsStruct s; + +#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME) \ + stats->VAR = 0; + + PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS) + +#undef PARALLELS_ZERO_STATS +
Why don't you use memset here? to make code uniform. I use PARALLELS_BLOCK_STATS_FOREACH macro to iterate on all disk stats PCS supports in different places, here i use this macro make initialization somewhat *explicit* instead of *implicit* memset.
+ static virHypervisorDriver parallelsDriver = { .name = "Parallels", .connectOpen = parallelsConnectOpen, /* 0.10.0 */ @@ -1228,6 +1332,8 @@ static virHypervisorDriver parallelsDriver = { .domainManagedSave = parallelsDomainManagedSave, /* 1.2.14 */ .domainManagedSaveRemove = parallelsDomainManagedSaveRemove, /* 1.2.14 */ .domainGetMaxMemory = parallelsDomainGetMaxMemory, /* 1.2.15 */ + .domainBlockStats = parallelsDomainBlockStats, /* 1.2.16 */ + .domainBlockStatsFlags = parallelsDomainBlockStatsFlags, /* 1.2.16 */
Could you, please, update there versions to 1.2.17?
};
ok
static virConnectDriver parallelsConnectDriver = { diff --git a/src/parallels/parallels_sdk.c b/src/parallels/parallels_sdk.c index 88ad59b..eb8d965 100644 --- a/src/parallels/parallels_sdk.c +++ b/src/parallels/parallels_sdk.c @@ -21,6 +21,7 @@ */
....
+ goto cleanup; + switch (prlEventType) { case PET_DSP_EVT_VM_STATE_CHANGED: + prlsdkHandleVmStateEvent(privconn, prlEvent, uuid); + break; case PET_DSP_EVT_VM_CONFIG_CHANGED: + prlsdkHandleVmConfigEvent(privconn, uuid); + break; case PET_DSP_EVT_VM_CREATED: case PET_DSP_EVT_VM_ADDED: + prlsdkHandleVmAddedEvent(privconn, uuid); + break; case PET_DSP_EVT_VM_DELETED: case PET_DSP_EVT_VM_UNREGISTERED: - pret = prlsdkHandleVmEvent(privconn, prlEvent); + prlsdkHandleVmRemovedEvent(privconn, uuid); + break; + case PET_DSP_EVT_VM_PERFSTATS: + prlsdkHandlePerfEvent(privconn, prlEvent, uuid); + // above function takes own of event + prlEvent = PRL_INVALID_HANDLE; break; default: VIR_DEBUG("Skipping event of type %d", prlEventType); @@ -3455,3 +3481,108 @@ prlsdkDomainManagedSaveRemove(virDomainObjPtr dom) return 0; } +
Could you describe the idea with stats cache in more detail? Why do you keer up to 3 prlsdk stats, but use only one? And where do you free these handles?
ok, this will go to commit message Shortly. 1. 3 - this is implicit timeout to drop cache. Explicit is is 3 * PCS_INTERVAL_BETWEEN_STAT_EVENTS. If nobody asks for statistics for this time interval we unsubscribe and make cache invalid, so if somebody will want statistics again we will need to subsciribe again and wait for first event to arrive to proceed. 2. Event handle is freed when next event arrived.
+int +prlsdkGetBlockStats(virDomainObjPtr dom, virDomainDiskDefPtr disk, virDomainBlockStatsPtr stats) +{ + virDomainDeviceDriveAddressPtr address; + int idx; + const char *prefix; + int ret = -1; + char *name = NULL; + + address = &disk->info.addr.drive; + switch (disk->bus) { + case VIR_DOMAIN_DISK_BUS_IDE: + prefix = "ide"; + idx = address->bus * 2 + address->unit; + break; + case VIR_DOMAIN_DISK_BUS_SATA: + prefix = "sata"; + idx = address->unit; + break; + case VIR_DOMAIN_DISK_BUS_SCSI: + prefix = "scsi"; + idx = address->unit; + break; + default: + virReportError(VIR_ERR_INTERNAL_ERROR, + _("Unknown disk bus: %X"), disk->bus); + goto cleanup; + } +
I think this won't work if the domain has disks on different buses.
No, it's correct as i use prefix also.
Meanwhile I discover next failures in this patch that will be fixed in next version. 1. This patch includes refactorings for eventHandle function that should be done in other way. 2. We don't keep reference to domain while waiting for event so we could get wild pointer after wait finishes. 3. We don't have a timeout on waiting for event. All will be fixed in next version.
participants (2)
-
Dmitry Guryanov
-
Nikolay Shirokovskiy