[libvirt] [PATCH 1/1] XenAPI remote storage support on libvirt
by Sharadha Prabhakar (3P)
This patch contains the APIs for support XenAPI remote storage support on libvirt.
This patch allows you to list storage pools, storage volumes, get information
about storage pools and volumes and create storage pools of type NETFS
with format type nfs,cifs-iso,nfs-iso using virsh. You can also create
VMs with storage pools attached and destroy storage pools.
While creating a VM with storage. The disk tag's source element should be
of the form '/storage pool uuid/storage volume uuid'.
--- ./libvirt_org/src/xenapi/xenapi_storage_driver.c 1970-01-01 01:00:00.000000000 +0100
+++ ./libvirt/src/xenapi/xenapi_storage_driver.c 2010-03-24 15:27:43.000000000 +0000
@@ -0,0 +1,1499 @@
+/*
+ * xenapi_storage_driver.c: Xen API storage driver APIs
+ * Copyright (C) 2009, 2010 Citrix Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Sharadha Prabhakar <sharadha.prabhakar(a)citrix.com>
+ */
+
+#include <config.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <libxml/uri.h>
+#include <xen_internal.h>
+#include <libxml/parser.h>
+#include <curl/curl.h>
+#include <xen/api/xen_common.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_all.h>
+#include <xen/api/xen_vm_metrics.h>
+#include <xen/api/xen_api_failure.h>
+#include <xen/dom0_ops.h>
+
+#include "libvirt_internal.h"
+#include "libvirt/libvirt.h"
+#include "virterror_internal.h"
+#include "storage_conf.h"
+#include "datatypes.h"
+#include "xenapi_driver.h"
+#include "util.h"
+#include "uuid.h"
+#include "authhelper.h"
+#include "memory.h"
+#include "driver.h"
+#include "util/logging.h"
+#include "buf.h"
+#include "xenapi_utils.h"
+#include "xenapi_storage_driver.h"
+
+/*
+*XenapiStorageOpen
+*
+*Authenticates and creates a session with the server
+*Returns VIR_DRV_OPEN_SUCCESS on success, else VIR_DRV_OPEN_ERROR
+*/
+static virDrvOpenStatus
+xenapiStorageOpen (virConnectPtr conn, virConnectAuthPtr auth, int flags ATTRIBUTE_UNUSED)
+{
+ char *username = NULL;
+ char *password = NULL;
+ struct _xenapiStoragePrivate *privP = NULL;
+
+ if (conn->uri == NULL || conn->uri->scheme == NULL ||
+ STRCASENEQ(conn->uri->scheme, "XenAPI")) {
+ return VIR_DRV_OPEN_DECLINED;
+ }
+
+ if (conn->uri->server == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Server name not in URI");
+ goto error;
+ }
+
+ if (auth == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Authentication Credentials not found");
+ goto error;
+ }
+
+ if (conn->uri->user != NULL) {
+ username = strdup(conn->uri->user);
+
+ if (username == NULL) {
+ virReportOOMError();
+ goto error;
+ }
+ } else {
+ username = virRequestUsername(auth, NULL, conn->uri->server);
+
+ if (username == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Username request failed");
+ goto error;
+ }
+ }
+
+ password = virRequestPassword(auth, username, conn->uri->server);
+
+ if (password == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Password request failed");
+ goto error;
+ }
+
+ if (VIR_ALLOC(privP) < 0) {
+ virReportOOMError();
+ goto error;
+ }
+
+ if (virAsprintf(&privP->url, "https://%s", conn->uri->server) < 0) {
+ virReportOOMError();
+ goto error;
+ }
+
+ if (xenapiUtil_ParseQuery(conn, conn->uri, &privP->noVerify) < 0)
+ goto error;
+
+ xmlInitParser();
+ xmlKeepBlanksDefault(0);
+ xen_init();
+ curl_global_init(CURL_GLOBAL_ALL);
+
+ privP->session = xen_session_login_with_password(call_func, privP, username,
+ password, xen_api_latest_version);
+
+ if (privP->session != NULL && privP->session->ok) {
+ conn->storagePrivateData = privP;
+ VIR_FREE(username);
+ VIR_FREE(password);
+ return VIR_DRV_OPEN_SUCCESS;
+ }
+
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED, "");
+
+ error:
+ VIR_FREE(username);
+ VIR_FREE(password);
+
+ if (privP != NULL) {
+ if (privP->session != NULL)
+ xenSessionFree(privP->session);
+
+ VIR_FREE(privP->url);
+ VIR_FREE(privP);
+ }
+
+ return VIR_DRV_OPEN_ERROR;
+}
+
+
+/*
+*XenapiStorageClose
+*
+*Closes the session with the server
+*Returns 0 on success
+*/
+static int
+xenapiStorageClose (virConnectPtr conn)
+{
+ struct _xenapiStoragePrivate *priv = (struct _xenapiStoragePrivate *)conn->storagePrivateData;
+ xen_session_logout(priv->session);
+ VIR_FREE(priv->url);
+ VIR_FREE(priv);
+ return 0;
+
+}
+
+/*
+*XenapiNumOfStoragePools
+*
+*Provides the number of active storage pools
+*Returns number of pools found on success, or -1 on error
+*/
+static int
+xenapiNumOfStoragePools (virConnectPtr conn)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ bool currently_attached;
+ int cnt=-1,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; i<sr_set->size; i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 1) cnt++;
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return cnt;
+}
+
+/*
+*XenapiListStoragePools
+*
+*Provides the list of names of active storage pools upto maxnames
+*returns number of names in the list on success ,or -1 or error
+*/
+static int
+xenapiListStoragePools (virConnectPtr conn, char **const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ char *usenames=NULL;
+ bool currently_attached;
+ int count=0,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; (i<sr_set->size) && (count<maxnames); i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 1) {
+ if(!(usenames = strdup(record->name_label))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ names[count++] = usenames;
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ goto cleanup;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+ cleanup:
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ while (--count>=0) VIR_FREE(names[count]);
+ return -1;
+}
+
+
+/*
+*XenapiListDefinedStoragePools
+*
+*Provides the list of names of inactive storage pools upto maxnames
+*
+*/
+static int
+xenapiListDefinedStoragePools (virConnectPtr conn, char **const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ char *usenames=NULL;
+ bool currently_attached;
+ int count=0,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; (i<sr_set->size) && (count<maxnames); i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 0) {
+ if(!(usenames = strdup(record->name_label))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ names[count++] = usenames;
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ goto cleanup;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+
+ cleanup:
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ while (--count>=0) free(names[count]);
+ return -1;
+}
+
+
+/*
+*XenapiNumOfDefinedStoragePools
+*
+*Provides the number of inactive storage pools
+*
+*/
+static int
+xenapiNumOfDefinedStoragePools (virConnectPtr conn)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ int cnt=-1,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; i<sr_set->size; i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ bool currently_attached;
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 0) cnt++;
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return cnt;
+}
+
+/*
+*XenapiStoragePoolCreateXML
+*
+*Creates a Storage Pool from the given XML
+* Only storage pool type NETFS is supported for now
+*/
+static virStoragePoolPtr
+xenapiStoragePoolCreateXML (virConnectPtr conn, const char *xmlDesc,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ virStoragePoolDefPtr pdef = NULL;
+ char *pooltype=NULL;
+ xen_sr sr=NULL;
+ xen_host host=NULL;
+ virBuffer path = VIR_BUFFER_INITIALIZER;
+ xen_string_string_map *device_config=NULL,*smconfig=NULL;
+ virStoragePoolPtr poolPtr = NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_sr_record *sr_record = NULL;
+ xen_session *session = ((struct _xenapiPrivate *)(conn->privateData))->session;
+ if(!(pdef = virStoragePoolDefParseString(xmlDesc))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't parse XML");
+ virBufferFreeAndReset(&path);
+ return NULL;
+ }
+ if (pdef->type != VIR_STORAGE_POOL_NETFS) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Only Pool type NETFS is currently supported");
+ goto cleanup;
+ } else {
+ if (pdef->source.format == VIR_STORAGE_POOL_NETFS_NFS_ISO) {
+ pooltype = (char *)"iso";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Host name required for creating NFS ISO SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating NFS ISO SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(1);
+ if (!(device_config->contents[0].key = strdup("location"))) {
+ goto cleanup_device_config;
+ }
+ virBufferVSprintf(&path,"%s:%s",pdef->source.host.name, pdef->source.dir);
+ device_config->contents[0].val = virBufferContentAndReset(&path);
+ smconfig = xen_string_string_map_alloc(0);
+ }
+ else if (pdef->source.format == VIR_STORAGE_POOL_NETFS_CIFS_ISO) {
+ pooltype = (char *)"iso";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Host name required for creating CIFS ISO SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating CIFS ISO SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(1);
+ if (!(device_config->contents[0].key = strdup("location")))
+ goto cleanup_device_config;
+
+ if (pdef->source.host.name[0] != '/') {
+ virBufferVSprintf(&path,"//%s%s",pdef->source.host.name, pdef->source.dir);
+ }
+ else {
+ virBufferVSprintf(&path,"%s%s",pdef->source.host.name, pdef->source.dir);
+ }
+ device_config->contents[0].val = virBufferContentAndReset(&path);
+ smconfig = xen_string_string_map_alloc(1);
+ if (!(smconfig->contents[0].key = strdup("iso_type"))) {
+ xen_string_string_map_free(smconfig);
+ xen_string_string_map_free(device_config);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ if (!(smconfig->contents[0].val = strdup("cifs"))) {
+ xen_string_string_map_free(smconfig);
+ xen_string_string_map_free(device_config);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ }
+ else if (pdef->source.format == VIR_STORAGE_POOL_NETFS_NFS) {
+ pooltype = (char *)"nfs";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Server name required for creating NFS SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating NFS SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(2);
+ if (!(device_config->contents[0].key = strdup("server")))
+ goto cleanup_device_config;
+ if (!(device_config->contents[0].val = strdup(pdef->source.host.name)))
+ goto cleanup_device_config;
+ if (!(device_config->contents[1].key = strdup("serverpath")))
+ goto cleanup_device_config;
+ if (!(device_config->contents[1].val = strdup(pdef->source.dir)))
+ goto cleanup_device_config;
+ smconfig = xen_string_string_map_alloc(0);
+ virBufferFreeAndReset(&path);
+ }
+ else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Format type of NETFS not supported by the hypervisor");
+ goto cleanup;
+ }
+ }
+ if (!xen_session_get_this_host(session, &host, session)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ if (!xen_sr_create(session, &sr, host, device_config, 0, pdef->name, (char *)"",
+ pooltype, (char *) "iso", true, smconfig)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ xen_host_free(host);
+ return NULL;
+ }
+ if (!xen_sr_get_record(session, &sr_record, sr)){
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ xen_host_free(host);
+ xen_sr_free(sr);
+ return NULL;
+ }
+ virUUIDParse(sr_record->uuid,raw_uuid);
+ poolPtr = virGetStoragePool(conn,(const char *)sr_record->name_label,raw_uuid);
+ if (!poolPtr) xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get a valid storage pool pointer");
+ virStoragePoolDefFree(pdef);
+ xen_sr_record_free(sr_record);
+ xen_host_free(host);
+ return poolPtr;
+
+ cleanup_device_config:
+ xen_string_string_map_free(device_config);
+
+ cleanup:
+ virStoragePoolDefFree(pdef);
+ virBufferFreeAndReset(&path);
+ return NULL;
+}
+
+static int
+xenapiStoragePoolBuild (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ return 0; /* return SUCCESS for now */
+}
+
+
+static int
+xenapiStoragePoolCreate (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+
+/*
+*XenapiStoragePoolSetAutostart
+*
+*Autostart option is always ON by default and is not allowed to be OFF
+*
+*/
+static int
+xenapiStoragePoolSetAutostart (virStoragePoolPtr pool, int autostart)
+{
+ virConnectPtr conn = pool->conn;
+ if (autostart == 1) {
+ VIR_DEBUG0("XenAPI storage pools autostart option is always ON by default");
+ return 0;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Hypervisor doesn't allow autostart to be OFF");
+ return -1;
+ }
+}
+
+
+/*
+*XenapiStoragePoolGetAutostart
+*
+*Returns the storage pool autostart option. Which is always ON
+*
+*/
+static int
+xenapiStoragePoolGetAutostart (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ int * autostart)
+{
+ *autostart=1; /* XenAPI storage pools always have autostart set to ON */
+ return 0;
+}
+
+
+/*
+*XenapiStoragePoolLookupByName
+*
+* storage pool based on its unique name
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByName (virConnectPtr conn,
+ const char * name)
+{
+ virStoragePoolPtr poolPtr=NULL;
+ xen_sr_record *record=NULL;
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_by_name_label(session, &sr_set, (char *)name) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ sr = sr_set->contents[0];
+ if (!xen_sr_get_record(session, &record, sr)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ virUUIDParse(record->uuid,raw_uuid);
+ if (!(poolPtr = virGetStoragePool(conn,name,raw_uuid)))
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool pointer not available");
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return poolPtr;
+}
+
+
+/*
+*XenapiStoragePoolGetXMLDesc
+*
+*Returns the configuration of a storage pool as XML
+*
+*/
+static char *
+xenapiStoragePoolGetXMLDesc (virStoragePoolPtr pool,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ xen_sr_record *record=NULL;
+ xen_sr sr=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_pbd pbd=NULL;
+ char *pathDetails = NULL, *host=NULL, *path=NULL,*xml=NULL;
+ virConnectPtr conn = pool->conn;
+ virStoragePoolDefPtr pdef=NULL;
+ xen_string_string_map *smconfig=NULL;
+ bool cifs;
+ xen_string_string_map *deviceConfig=NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ int i;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr)) {
+ if (!xen_sr_get_record(session, &record, sr)) {
+ xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get SR information");
+ return NULL;
+ }
+ if (VIR_ALLOC(pdef)<0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ return NULL;
+ }
+ if (STREQ(record->type,"nfs") || STREQ(record->type,"iso"))
+ pdef->type = VIR_STORAGE_POOL_NETFS;
+ else if(STREQ(record->type,"iscsi"))
+ pdef->type = VIR_STORAGE_POOL_ISCSI;
+ else if(STREQ(record->type,"file"))
+ pdef->type = VIR_STORAGE_POOL_DIR;
+ else if(STREQ(record->type,"lvm"))
+ pdef->type = VIR_STORAGE_POOL_LOGICAL;
+ else if(STREQ(record->type,"ext")) {
+ pdef->type = VIR_STORAGE_POOL_FS;
+ pdef->source.format = VIR_STORAGE_POOL_FS_EXT3;
+ }
+ else if(STREQ(record->type,"hba"))
+ pdef->type = VIR_STORAGE_POOL_SCSI;
+
+ if (!(pdef->name = strdup(record->name_label))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ virUUIDParse(record->uuid,pdef->uuid);
+ pdef->allocation = (record->virtual_allocation)/1024;
+ pdef->capacity = (record->physical_size)/1024;
+ pdef->available = (record->physical_size - record->physical_utilisation)/1024;
+
+ if (STREQ(record->type,"iso")) {
+ if (xen_sr_get_sm_config(session, &smconfig, sr)){
+ cifs = false;
+ for (i=0;i<smconfig->size;i++){
+ if (STREQ(smconfig->contents[i].key,"iso_type")
+ && STREQ(smconfig->contents[i].val, "cifs"))
+ cifs = true;
+ break;
+ }
+ xen_string_string_map_free(smconfig);
+ xen_sr_get_pbds (session, &pbd_set, sr);
+ pbd = pbd_set->contents[0];
+ xen_pbd_get_device_config(session, &deviceConfig, pbd);
+ if (deviceConfig) {
+ for (i=0;i<deviceConfig->size;i++) {
+ if(STREQ(deviceConfig->contents[i].key,"location")) {
+ if (!(pathDetails = strdup(deviceConfig->contents[i].val))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ xen_string_string_map_free(deviceConfig);
+ xen_pbd_set_free(pbd_set);
+ return NULL;
+ }
+ break;
+ }
+ }
+ xen_string_string_map_free(deviceConfig);
+ xen_pbd_set_free(pbd_set);
+ }
+ if (pathDetails) {
+ if (VIR_ALLOC_N(host,strlen(pathDetails)) <0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ if (VIR_ALLOC_N(path,strlen(pathDetails)) <0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ host[0]='\0';path[0]='\0';
+ if (cifs) {
+ pdef->source.format = VIR_STORAGE_POOL_NETFS_CIFS_ISO;
+ sscanf(pathDetails,"//%[^/]%s",host,path);
+ } else {
+ pdef->source.format = VIR_STORAGE_POOL_NETFS_NFS_ISO;
+ sscanf(pathDetails,"%[^:]:%s",host,path);
+ }
+ if (STRNEQ(host,"\0")) {
+ if (!(pdef->source.host.name = strdup(host))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ }
+ if (STRNEQ(path,"\0")) {
+ if (!(pdef->source.dir = strdup(path))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ }
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ }
+ }
+ }
+ if (!(pdef->target.path = strdup("/"))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ xen_sr_record_free(record);
+ xml = virStoragePoolDefFormat(pdef);
+ virStoragePoolDefFree(pdef);
+ if (!xml)
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't convert to XML format");
+ return xml;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return NULL;
+ }
+}
+
+
+/*
+*XenapiStoragePoolNumOfVolumes
+*
+*Fetch the number of storage volumes within a pool
+*
+*/
+static int
+xenapiStoragePoolNumOfVolumes (virStoragePoolPtr pool ATTRIBUTE_UNUSED)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_set *vdi_set=NULL;
+ virConnectPtr conn = pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ int count=0;
+ if (xen_sr_get_by_name_label(session, &sr_set, pool->name) && sr_set->size!=0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool Name is not unique");
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ sr = sr_set->contents[0];
+ if (xen_sr_get_vdis(session, &vdi_set, sr) && vdi_set->size!=0) {
+ count = vdi_set->size;
+ xen_sr_set_free(sr_set);
+ xen_vdi_set_free(vdi_set);
+ return count;
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+ }
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+}
+
+
+/*
+*XenapiStoragePoolListVolumes
+*
+*Fetch list of storage volume names, limiting to at most maxnames.
+*
+*/
+static int
+xenapiStoragePoolListVolumes (virStoragePoolPtr pool, char ** const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_set *vdi_set=NULL;
+ xen_vdi vdi=NULL;
+ int count,i;
+ char *usenames = NULL;
+ virConnectPtr conn=pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_by_name_label(session, &sr_set, pool->name) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ sr = sr_set->contents[0];
+ if (xen_sr_get_vdis(session, &vdi_set, sr) && vdi_set->size>0) {
+ for (i=0,count=0; (i<vdi_set->size) && (count<maxnames); i++) {
+ vdi = vdi_set->contents[i];
+ if (xen_vdi_get_name_label(session, &usenames, vdi)) {
+ names[count++] = usenames;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_vdi_set_free(vdi_set);
+ xen_sr_set_free(sr_set);
+ while(--count) VIR_FREE(names[count]);
+ return -1;
+ }
+ }
+ xen_vdi_set_free(vdi_set);
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+}
+
+/*
+*XenapiStoragePoolIsActive
+*
+*Determine if the storage pool is currently running
+*
+*/
+static int
+xenapiStoragePoolIsActive(virStoragePoolPtr pool)
+{
+ xen_sr sr=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ virConnectPtr conn=pool->conn;
+ char uuid[VIR_UUID_STRING_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuid);
+
+ if (xen_sr_get_by_uuid(session, &sr, uuid)) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr) && pbd_set->size>0) {
+ bool currently_attached;
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ xen_pbd_set_free(pbd_set);
+ xen_sr_free(sr);
+ if (currently_attached == 1)
+ return 1; /* running */
+ else
+ return 0; /* not running */
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Device not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_sr_free(sr);
+ return -1;
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+}
+
+/*
+*XenapiStoragePoolLookupByUUID
+*
+*Lookup the storage pool by UUID
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByUUID (virConnectPtr conn,
+ const unsigned char * uuid)
+{
+ xen_sr sr = NULL;
+ xen_sr_record *record = NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ virStoragePoolPtr pool = NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr) && sr) {
+ if (xen_sr_get_record(session, &record, sr)) {
+ pool = virGetStoragePool(conn, record->name_label, uuid);
+ if (!pool) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get storage pool pointer");
+ xen_sr_record_free(record);
+ return pool;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_free(sr);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, NULL);
+ }
+ return pool;
+}
+
+
+/*
+*XenapiStoragePoolGetInfo
+*
+*Get information regarding the given storage pool
+*
+*/
+static int
+xenapiStoragePoolGetInfo (virStoragePoolPtr pool,
+ virStoragePoolInfoPtr info)
+{
+ xen_sr_record *record=NULL;
+ xen_sr sr=NULL;
+ virConnectPtr conn = pool->conn;
+ int state = -1;
+ char uuid[VIR_UUID_STRING_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuid);
+ if (xen_sr_get_by_uuid(session, &sr, uuid) && sr) {
+ if (xen_sr_get_record(session, &record, sr)) {
+ info->capacity = record->physical_size;
+ info->allocation = record->virtual_allocation;
+ info->available = record->physical_size - record->physical_utilisation;
+ state = xenapiStoragePoolIsActive(pool);
+ if(state == 1) info->state = VIR_STORAGE_POOL_RUNNING;
+ else if(state == 0) info->state = VIR_STORAGE_POOL_INACTIVE;
+ xen_sr_record_free(record);
+ return 0;
+ } else {
+ xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, NULL);
+ return -1;
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+}
+
+/*
+*XenapiStoragePoolLookupByVolume
+*
+*Lookup storage pool from the volume given
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByVolume (virStorageVolPtr vol)
+{
+ xen_sr_record *record=NULL;
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ virStoragePoolPtr poolPtr=NULL;
+ virConnectPtr conn = vol->conn;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_sr_get_by_name_label(session, &sr_set, vol->pool) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ sr = sr_set->contents[0];
+ xen_sr_get_record(session, &record, sr);
+ if (record!=NULL) {
+ virUUIDParse(record->uuid,raw_uuid);
+ poolPtr = virGetStoragePool(conn,(const char *)record->name_label, raw_uuid);
+ if (poolPtr != NULL) {
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return poolPtr;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool pointer unavailable");
+ }
+ xen_sr_record_free(record);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return NULL;
+}
+
+/*
+*XenapiStorageVolLookupByName
+*
+*Lookup Storage volume by unique name
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByName (virStoragePoolPtr pool,
+ const char *name)
+{
+ xen_vdi_set *vdi_set=NULL;
+ xen_vdi vdi=NULL;
+ virStorageVolPtr volPtr=NULL;
+ virConnectPtr conn = pool->conn;
+ char *uuid=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_vdi_get_by_name_label(session, &vdi_set, (char *)name) && vdi_set->size>0) {
+ if (vdi_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume name is not unique");
+ xen_vdi_set_free(vdi_set);
+ return NULL;
+ }
+ vdi = vdi_set->contents[0];
+ if (xen_vdi_get_uuid(session, &uuid, vdi)) {
+ volPtr = virGetStorageVol(conn, pool->name, name, uuid);
+ if (!volPtr) xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume pointer not available");
+ VIR_FREE(uuid);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't find the Unique key of the Storage Volume specified");
+ }
+ xen_vdi_set_free(vdi_set);
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_VOL, "Storage Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return volPtr;
+}
+
+/*
+*XenapiStorageVolGetInfo
+*
+*Get information about the given storage volume
+*
+*/
+static int
+xenapiStorageVolGetInfo (virStorageVolPtr vol,
+ virStorageVolInfoPtr info)
+{
+ virConnectPtr conn = vol->conn;
+ xen_vdi vdi=NULL;
+ xen_vdi_record *record=NULL;
+ xen_sr sr=NULL;
+ xen_sr_record *sr_record=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ int ret=-1;
+ //char uuid[VIR_UUID_STRING_BUFLEN];
+ //virUUIDFormat((unsigned char *)vol->key,uuid);
+
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (xen_vdi_get_record(session, &record, vdi)) {
+ info->capacity = record->virtual_size;
+ info->allocation = record->physical_utilisation;
+ if (xen_vdi_get_sr(session, &sr, vdi)) {
+ if (xen_sr_get_record(session, &sr_record, sr)) {
+ info->type = getStorageVolumeType(sr_record->type);
+ xen_sr_record_free(sr_record);
+ ret=0;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_record_free(record);
+ } else {
+ xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return ret;
+}
+
+static int
+xenapiStoragePoolIsPersistent (virStoragePoolPtr pool ATTRIBUTE_UNUSED)
+{
+ return 1; /* Storage Pool is always persistent */
+}
+
+
+/*
+*XenapiStorageVolGetXMLDesc
+*
+*Get Storage Volume configuration as XML
+*
+*/
+static char *
+xenapiStorageVolGetXMLDesc (virStorageVolPtr vol, unsigned int flags ATTRIBUTE_UNUSED)
+{
+ virBuffer buf = VIR_BUFFER_INITIALIZER;
+ virConnectPtr conn = vol->conn;
+ xen_vdi vdi=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_record *record=NULL;
+ char *sr_uuid =NULL, *srname=NULL, *xml=NULL, *poolXml=NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ virStorageVolDefPtr vdef=NULL;
+ virStoragePoolDefPtr pdef=NULL;
+ virStoragePoolPtr pool=NULL;
+
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (!xen_vdi_get_record(session, &record, vdi)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get Volume information");
+ xen_vdi_free(vdi);
+ virBufferFreeAndReset(&buf);
+ return NULL;
+ }
+ if (VIR_ALLOC(vdef)<0) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ return NULL;
+ }
+ if (!(vdef->name = strdup(record->name_label))) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ if (!(vdef->key = strdup(record->uuid))) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ vdef->allocation = record->virtual_size;
+ vdef->capacity = record->physical_utilisation;
+
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_uuid(session, &sr_uuid, sr)) {
+ virBufferVSprintf(&buf, "/%s/%s", sr_uuid, record->uuid);
+ vdef->target.path = virBufferContentAndReset(&buf);
+ }
+ xen_sr_get_name_label(session, &srname, sr);
+ if (sr) xen_sr_free(sr);
+ xen_vdi_record_free(record);
+
+ virUUIDParse(sr_uuid, raw_uuid);
+ if(!(pool = virGetStoragePool(conn, srname, raw_uuid))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Could get storage pool pointer");
+ VIR_FREE(srname);
+ VIR_FREE(sr_uuid);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ VIR_FREE(srname);
+ VIR_FREE(sr_uuid);
+ if (!(poolXml = xenapiStoragePoolGetXMLDesc(pool, 0))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get Storage Pool XML");
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ if(!(pdef = virStoragePoolDefParseString(poolXml))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't parse Storage Pool XML");
+ VIR_FREE(poolXml);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ VIR_FREE(poolXml);
+ if(!(xml = virStorageVolDefFormat(pdef, vdef))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't convert Storage Volume info to XML");
+ virStorageVolDefFree(vdef);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ virStorageVolDefFree(vdef);
+ virStoragePoolDefFree(pdef);
+ return xml;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return NULL;
+ }
+}
+
+/*
+*XenapiStorageVolLookupByPath
+*
+*Lookup Storage Volume for the given path
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByPath (virConnectPtr conn,
+ ATTRIBUTE_UNUSED const char * path)
+{
+ xen_sr sr=NULL;
+ xen_vdi vdi=NULL;
+ virStorageVolPtr volPtr=NULL;
+ char *srname=NULL,*vname=NULL;
+ char sruuid[VIR_UUID_STRING_BUFLEN]="\0", vuuid[VIR_UUID_STRING_BUFLEN]="\0";
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ sscanf(path,"/%[^/]/%[^/]",sruuid,vuuid);
+ if (STREQ(sruuid,"\0") || STREQ(vuuid,"\0")) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Invalid path");
+ return NULL;
+ }
+ if (xen_sr_get_by_uuid(session, &sr, sruuid) && xen_sr_get_name_label(session, &srname, sr)) {
+ if (xen_vdi_get_by_uuid(session, &vdi, vuuid) && xen_vdi_get_name_label(session, &vname, vdi)) {
+ if (!(volPtr = virGetStorageVol(conn, srname, vname, vuuid)))
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume pointer not available");
+ VIR_FREE(vname);
+ xen_vdi_free(vdi);
+ } else {
+ if (vdi) xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ VIR_FREE(srname);
+ xen_sr_free(sr);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return volPtr;
+}
+
+/*
+*XenapiStorageVolGetPath
+*
+*Get path for the specified storage volume
+*
+*/
+static char *
+xenapiStorageVolGetPath (virStorageVolPtr vol)
+{
+ xen_vdi vdi=NULL;
+ virConnectPtr conn = vol->conn;
+ virBuffer path = VIR_BUFFER_INITIALIZER;
+ xen_sr sr=NULL;
+ char *sruuid=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_uuid(session, &sruuid, sr)) {
+ virBufferVSprintf(&path,"/%s/%s",sruuid,vol->key);
+ VIR_FREE(sruuid);
+ xen_sr_free(sr);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_free(vdi);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return virBufferContentAndReset(&path);
+}
+
+static int
+xenapiStoragePoolRefresh ( ATTRIBUTE_UNUSED virStoragePoolPtr pool,
+ ATTRIBUTE_UNUSED unsigned int flags)
+{
+ return 0;
+}
+
+/*
+*XenapiStorageVolLookupByKey
+*
+*Lookup storage volume for the given key
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByKey (virConnectPtr conn, const char * key)
+{
+ xen_vdi vdi=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_record *vrecord=NULL;
+ xen_sr_record *srecord=NULL;
+ virStorageVolPtr volPtr=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_vdi_get_by_uuid(session, &vdi, (char *)key) && xen_vdi_get_record(session, &vrecord, vdi)) {
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_record(session, &srecord, sr)) {
+ volPtr = virGetStorageVol(conn, srecord->name_label, vrecord->name_label, key);
+ if (!volPtr)
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume Pointer not available");
+ xen_sr_record_free(srecord);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_record_free(vrecord);
+ } else {
+ if (vdi) xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return volPtr;
+}
+
+
+/*
+*XenapiStoragePoolDestroy
+*
+*unplug PBDs connected to the specified storage pool
+*
+*/
+static int
+xenapiStoragePoolDestroy (virStoragePoolPtr pool)
+{
+ xen_sr sr=NULL;
+ xen_pbd pbd=NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ struct xen_pbd_set *pbd_set=NULL;
+ int i,ret=-1;
+ virConnectPtr conn = pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr)) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr) && pbd_set->size>0) {
+ for (i=0;i<pbd_set->size;i++) {
+ pbd = pbd_set->contents[0];
+ if (xen_pbd_unplug(session, pbd))
+ ret=0;
+ else
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "There are no PBDs in the specified pool to unplug");
+ xen_pbd_set_free(pbd_set);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ xen_sr_free(sr);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return ret;
+}
+
+static virStorageDriver xenapiStorageDriver = {
+ "XenAPI Storage",
+ xenapiStorageOpen,
+ xenapiStorageClose,
+ xenapiNumOfStoragePools,
+ xenapiListStoragePools,
+ xenapiNumOfDefinedStoragePools,
+ xenapiListDefinedStoragePools,
+ NULL,
+ xenapiStoragePoolLookupByName,
+ xenapiStoragePoolLookupByUUID,
+ xenapiStoragePoolLookupByVolume,
+ xenapiStoragePoolCreateXML,
+ NULL,
+ xenapiStoragePoolBuild,
+ NULL,
+ xenapiStoragePoolCreate,
+ xenapiStoragePoolDestroy,
+ NULL,
+ xenapiStoragePoolRefresh,
+ xenapiStoragePoolGetInfo,
+ xenapiStoragePoolGetXMLDesc,
+ xenapiStoragePoolGetAutostart,
+ xenapiStoragePoolSetAutostart,
+ xenapiStoragePoolNumOfVolumes,
+ xenapiStoragePoolListVolumes,
+ xenapiStorageVolLookupByName,
+ xenapiStorageVolLookupByKey,
+ xenapiStorageVolLookupByPath,
+ NULL,
+ NULL,
+ NULL,
+ xenapiStorageVolGetInfo,
+ xenapiStorageVolGetXMLDesc,
+ xenapiStorageVolGetPath,
+ xenapiStoragePoolIsActive,
+ xenapiStoragePoolIsPersistent
+};
+
+
+/*
+*XenapiStorageRegister
+*
+*Register the storage driver APIs
+*
+*/
+int
+xenapiStorageRegister (void)
+{
+ return virRegisterStorageDriver(&xenapiStorageDriver);
+}
+
+
+
+
--- ./libvirt_org/src/xenapi/xenapi_storage_driver.h 1970-01-01 01:00:00.000000000 +0100
+++ ./libvirt/src/xenapi/xenapi_storage_driver.h 2010-03-11 12:46:00.000000000 +0000
@@ -0,0 +1,42 @@
+/*
+ * xenapi_storage_driver.h: Xen API Storage Driver header file
+ * Copyright (C) 2009, 2010 Citrix Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Sharadha Prabhakar <sharadha.prabhakar(a)citrix.com>
+ */
+
+
+#ifndef __VIR_XENAPI_STORAGE_H__
+#define __VIR_XENAPI_STORAGE_H__
+
+#include <xen/api/xen_common.h>
+#include <libxml/tree.h>
+
+
+
+/* XenAPI storage driver's private data structure */
+struct _xenapiStoragePrivate {
+ xen_session *session;
+ char *url;
+ int noVerify;
+ virCapsPtr caps;
+};
+
+
+
+
+#endif /* __VIR_XENAPI_STORAGE_H__ */
--- ./src/xenapi/xenapi_utils.h_orig 2010-03-24 15:38:59.000000000 +0000
+++ ./src/xenapi/xenapi_utils.h 2010-03-23 10:44:38.000000000 +0000
@@ -56,8 +56,12 @@
#include "buf.h"
#define NETWORK_DEVID_SIZE (12)
+#define STORAGE_DEVID_SIZE (12)
typedef uint64_t cpumap_t;
+//newly added
+int
+createVbdStorage (virConnectPtr conn, xen_vm vm, int device, char *path, int devtype);
void
xenSessionFree(xen_session *session);
--- ./src/xenapi/xenapi_utils.c_orig 2010-03-24 15:32:28.000000000 +0000
+++ ./src/xenapi/xenapi_utils.c 2010-03-24 15:09:41.000000000 +0000
@@ -53,6 +53,7 @@
#include "xenapi_utils.h"
#include "util/logging.h"
#include "qparams.h"
+#include "xenapi_storage_driver.h"
void
xenSessionFree(xen_session *session)
@@ -390,17 +391,96 @@
const char *buf, const char *filename, const char *func, size_t lineno)
{
struct _xenapiPrivate *priv = conn->privateData;
-
- if (buf == NULL && priv != NULL && priv->session != NULL) {
- char *ret = returnErrorFromSession(priv->session);
- virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
- xen_session_clear_error(priv->session);
- VIR_FREE(ret);
+ struct _xenapiStoragePrivate *privS = conn->storagePrivateData;
+ char *ret = NULL;
+ if (buf == NULL) {
+ if (priv != NULL && priv->session != NULL) {
+ if (!priv->session->ok) {
+ ret = returnErrorFromSession(priv->session);
+ virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
+ xen_session_clear_error(priv->session);
+ VIR_FREE(ret);
+ }
+ }
+ if (privS != NULL && privS->session !=NULL) {
+ if (!privS->session->ok) {
+ ret = returnErrorFromSession(privS->session);
+ virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
+ xen_session_clear_error(privS->session);
+ VIR_FREE(ret);
+ }
+ }
} else {
virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), buf);
}
}
+/* create VBDs for VM */
+int
+createVbdStorage (virConnectPtr conn, xen_vm vm, int device, char *path, int devtype)
+{
+ xen_vm xvm=NULL;
+ xen_vdi vdi=NULL;
+ xen_vbd vbd=NULL;
+ char *vmuuid=NULL;
+ char userdevice[STORAGE_DEVID_SIZE]="\0";
+ xen_vbd_record *record=NULL;
+ xen_session *session = ((struct _xenapiPrivate *)(conn->privateData))->session;
+ char sruuid[VIR_UUID_STRING_BUFLEN]="\0", voluuid[VIR_UUID_STRING_BUFLEN]="\0";
+ if (sscanf(path,"/%[^/]/%[^/]",sruuid,voluuid)!=2)
+ return -1;
+ fprintf(stderr,"\nsruuid: %s\nvoluuid: %s",sruuid,voluuid);
+ if (!xen_vm_get_uuid(session, &vmuuid, vm))
+ return -1;
+ if (!xen_vm_get_by_uuid(session, &xvm, vmuuid)){
+ VIR_FREE(vmuuid);
+ return -1;
+ }
+ VIR_FREE(vmuuid);
+ if (!xen_vdi_get_by_uuid(session, &vdi, voluuid)) {
+ xen_vm_free(xvm);
+ return -1;
+ }
+ sprintf(userdevice,"%d",device);
+ xen_vm_record_opt *vm_opt = xen_vm_record_opt_alloc();
+ vm_opt->is_record = 0;
+ vm_opt->u.handle = xvm;
+
+ xen_vdi_record_opt *vdi_opt = xen_vdi_record_opt_alloc();
+ vdi_opt->is_record = 0;
+ vdi_opt->u.handle = vdi;
+
+ record = xen_vbd_record_alloc();
+ record->vm = vm_opt;
+ record->vdi = vdi_opt;
+ if (!(record->userdevice = strdup(userdevice))) {
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ record->other_config = xen_string_string_map_alloc(0);
+ record->runtime_properties = xen_string_string_map_alloc(0);
+ record->qos_algorithm_params = xen_string_string_map_alloc(0);
+ if (devtype == VIR_DOMAIN_DISK_DEVICE_DISK)
+ record->type = XEN_VBD_TYPE_DISK;
+ else if (devtype == VIR_DOMAIN_DISK_DEVICE_CDROM)
+ record->type = XEN_VBD_TYPE_CD;
+ else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Only CDROM and HardDisk supported");
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ if (!xen_vbd_create(session, &vbd, record)){
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ xen_vbd_record_free(record);
+
+ return 0;
+}
+
+
+
/* creates network intereface for VM */
int
createVifNetwork (virConnectPtr conn, xen_vm vm, char *device,
@@ -557,6 +637,7 @@
int device_number=0;
char *bridge=NULL,*mac=NULL;
int i;
+ //support for network interfaces
for (i=0;i<def->nnets;i++) {
if (def->nets[i]->type == VIR_DOMAIN_NET_TYPE_BRIDGE) {
if (def->nets[i]->data.bridge.brname)
@@ -580,6 +661,13 @@
if (bridge) VIR_FREE(bridge);
}
}
+ //support for disks here
+ for (i=0;i<def->ndisks;i++) {
+ if (createVbdStorage(conn, *vm, i, def->disks[i]->src, def->disks[i]->device)!= 0) {
+ xen_vm_record_free(*record);
+ return -1;
+ }
+ }
return 0;
error_cleanup:
--- ../libvirt_org/src/conf/storage_conf.c 2010-02-17 17:38:05.000000000 +0000
+++ ./src/conf/storage_conf.c 2010-03-22 15:08:36.000000000 +0000
@@ -61,7 +61,7 @@
VIR_ENUM_IMPL(virStoragePoolFormatFileSystemNet,
VIR_STORAGE_POOL_NETFS_LAST,
- "auto", "nfs", "glusterfs")
+ "auto", "nfs", "nfs-iso", "cifs-iso", "glusterfs")
VIR_ENUM_IMPL(virStoragePoolFormatDisk,
VIR_STORAGE_POOL_DISK_LAST,
--- ../libvirt_org/src/conf/storage_conf.h 2010-02-17 17:38:06.000000000 +0000
+++ ./src/conf/storage_conf.h 2010-03-22 14:01:02.000000000 +0000
@@ -404,6 +404,8 @@
enum virStoragePoolFormatFileSystemNet {
VIR_STORAGE_POOL_NETFS_AUTO = 0,
VIR_STORAGE_POOL_NETFS_NFS,
+ VIR_STORAGE_POOL_NETFS_NFS_ISO,
+ VIR_STORAGE_POOL_NETFS_CIFS_ISO,
VIR_STORAGE_POOL_NETFS_GLUSTERFS,
VIR_STORAGE_POOL_NETFS_LAST,
};
--- ./src/Makefile.am_04mar 2010-03-05 10:55:04.000000000 +0000
+++ ./src/Makefile.am 2010-03-23 18:11:50.000000000 +0000
@@ -210,7 +211,9 @@
XENAPI_DRIVER_SOURCES = \
xenapi/xenapi_driver.c xenapi/xenapi_driver.h \
xenapi_driver_private.h \
- xenapi/xenapi_utils.c xenapi/xenapi_utils.h
+ xenapi/xenapi_utils.c xenapi/xenapi_utils.h \
+ xenapi/xenapi_storage_driver.c \
+ xenapi/xenapi_storage_driver.h
UML_DRIVER_SOURCES = \
uml/uml_conf.c uml/uml_conf.h \
--- ../libvirt_org/src/libvirt.c 2010-02-17 17:38:08.000000000 +0000
+++ ./src/libvirt.c 2010-03-11 12:14:33.000000000 +0000
@@ -377,6 +381,10 @@
#ifdef WITH_ESX
if (esxRegister() == -1) return -1;
#endif
#ifdef WITH_XENAPI
if (xenapiRegister () == -1) return -1;
+ if (xenapiStorageRegister () == -1) return -1;
#endif
#ifdef WITH_REMOTE
if (remoteRegister () == -1) return -1;
#endif
--- ./src/xenapi/xenapi_driver.h_orig 2010-03-23 19:00:14.000000000 +0000
+++ ./src/xenapi/xenapi_driver.h 2010-03-11 11:11:01.000000000 +0000
@@ -25,5 +25,6 @@
extern int xenapiRegister (void);
+extern int xenapiStorageRegister (void);
#endif /* __VIR_XENAPI_PRIV_H__ */
15 years
[libvirt] using disk devices with mixed bus types (ide and virtio)
by Ingo Tuchscherer
Hello,
While using multiple disk devices with different bus types (ide and virtio)
I noticed that the order of disks is handled in a special way.
The disk device which was defined as the first item will not be the first
entry in the system xml file.
e.g. if I define a virtio device (which is supposed to be the boot device)
this would be added after all the ide devices.
So it's not possible to boot from a virtio device even if I have defined
other ide devices since qemu flags the first device as boot device.
Is there a special reason why the disks are sorted by type (first all ide
and then virtio) because that's prevents from booting virtio devices.
Thanks.
Mit freundlichen Grüßen / Kind regards
Ingo Tuchscherer
15 years
[libvirt] [PATCH] tests: test-lib.sh portability and clean-up
by Jim Meyering
No big deal, but I saw recent additions of "test ... -a ..."
(not portable) so fixed the rest, too.
Now, searching for violations shows none:
git grep '\<test .* -a '
Whether it's possible to rely on test -a in test scripts is debatable:
perhaps you've ensured that the SHELL you use when running tests is
POSIX compliant or better (I do that in coreutils), but at least in
configure.ac, we should toe the line wrt portability (because *it*
has less choice), so those are in a separate commit.
Since this is a global change, it deserves a syntax-check rule.
That's the 3/3 patch, below.
1/3 fixes test-lib.sh
2/3 fixes configure.ac
>From ca7db6cb8000cc283fcee7899140d2fc892b0296 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Wed, 24 Mar 2010 09:05:27 +0100
Subject: [PATCH 1/3] tests: shell script portability and clean-up
* tests/test-lib.sh: "echo -n" is not portable. Use printf instead.
Remove unnecessary uses of "eval-in-subshell" (subshell is sufficient).
Remove uses of tests' -a operator; it is not portable.
Instead, use "test cond && test cond2".
* tests/schematestutils.sh: Replace use of test's -a.
---
tests/schematestutils.sh | 2 +-
tests/test-lib.sh | 20 ++++++++++----------
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/tests/schematestutils.sh b/tests/schematestutils.sh
index 301b9eb..f172857 100644
--- a/tests/schematestutils.sh
+++ b/tests/schematestutils.sh
@@ -21,7 +21,7 @@ do
ret=$?
test_result $n $(basename $(dirname $xml))"/"$(basename $xml) $ret
- if test "$verbose" = "1" -a $ret != 0 ; then
+ if test "$verbose" = "1" && test $ret != 0 ; then
echo -e "$cmd\n$result"
fi
if test "$ret" != 0 ; then
diff --git a/tests/test-lib.sh b/tests/test-lib.sh
index 57fd438..28b830e 100644
--- a/tests/test-lib.sh
+++ b/tests/test-lib.sh
@@ -19,7 +19,7 @@ test_intro()
name=$1
if test "$verbose" = "0" ; then
echo "TEST: $name"
- echo -n " "
+ printf " "
fi
}
@@ -29,15 +29,15 @@ test_result()
name=$2
status=$3
if test "$verbose" = "0" ; then
- mod=`eval "expr \( $counter - 1 \) % 40"`
- if test "$counter" != 1 -a "$mod" = 0 ; then
- printf " %-3d\n" `eval "expr $counter - 1"`
- echo -n " "
+ mod=`expr \( $counter + 40 - 1 \) % 40`
+ if test "$counter" != 1 && test "$mod" = 0 ; then
+ printf " %-3d\n" `expr $counter - 1`
+ printf " "
fi
if test "$status" = "0" ; then
- echo -n "."
+ printf "."
else
- echo -n "!"
+ printf "!"
fi
else
if test "$status" = "0" ; then
@@ -54,11 +54,11 @@ test_final()
status=$2
if test "$verbose" = "0" ; then
- mod=`eval "expr \( $counter + 1 \) % 40"`
- if test "$mod" != "0" -a "$mod" != "1" ; then
+ mod=`expr \( $counter + 1 \) % 40`
+ if test "$mod" != "0" && test "$mod" != "1" ; then
for i in `seq $mod 40`
do
- echo -n " "
+ printf " "
done
fi
if test "$status" = "0" ; then
--
1.7.0.3.435.g097f4
>From 7998714d60b997357bfea15d6f2d0f729fc8fb29 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Wed, 24 Mar 2010 09:10:13 +0100
Subject: [PATCH 2/3] build: don't use "test cond1 -a cond2" in configure: it's not portable
* configure.ac: Use "test cond1 && test cond2" instead.
---
configure.ac | 26 +++++++++++++-------------
1 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/configure.ac b/configure.ac
index bcf1d5a..2e6d2e4 100644
--- a/configure.ac
+++ b/configure.ac
@@ -197,10 +197,10 @@ dnl if --prefix is /usr, don't use /usr/var for localstatedir
dnl or /usr/etc for sysconfdir
dnl as this makes a lot of things break in testing situations
-if test "$prefix" = "/usr" -a "$localstatedir" = '${prefix}/var' ; then
+if test "$prefix" = "/usr" && test "$localstatedir" = '${prefix}/var' ; then
localstatedir='/var'
fi
-if test "$prefix" = "/usr" -a "$sysconfdir" = '${prefix}/etc' ; then
+if test "$prefix" = "/usr" && test "$sysconfdir" = '${prefix}/etc' ; then
sysconfdir='/etc'
fi
@@ -240,7 +240,7 @@ AC_ARG_WITH([libvirtd],
dnl
dnl specific tests to setup DV devel environments with debug etc ...
dnl
-if [[ "${LOGNAME}" = "veillard" -a "`pwd`" = "/u/veillard/libvirt" ]] ; then
+if [[ "${LOGNAME}" = "veillard" && test "`pwd`" = "/u/veillard/libvirt" ]] ; then
STATIC_BINARIES="-static"
else
STATIC_BINARIES=
@@ -351,7 +351,7 @@ LIBXENSERVER_LIBS=""
LIBXENSERVER_CFLAGS=""
dnl search for the XenServer library
if test "$with_xenapi" != "no" ; then
- if test "$with_xenapi" != "yes" -a "$with_xenapi" != "check" ; then
+ if test "$with_xenapi" != "yes" && test "$with_xenapi" != "check" ; then
LIBXENSERVER_CFLAGS="-I$with_xenapi/include"
LIBXENSERVER_LIBS="-L$with_xenapi"
fi
@@ -390,7 +390,7 @@ XEN_LIBS=""
XEN_CFLAGS=""
dnl search for the Xen store library
if test "$with_xen" != "no" ; then
- if test "$with_xen" != "yes" -a "$with_xen" != "check" ; then
+ if test "$with_xen" != "yes" && test "$with_xen" != "check" ; then
XEN_CFLAGS="-I$with_xen/include"
XEN_LIBS="-L$with_xen/lib64 -L$with_xen/lib"
fi
@@ -571,7 +571,7 @@ AC_ARG_WITH([libxml], AC_HELP_STRING([--with-libxml=@<:@PFX@:>@], [libxml2 locat
if test "x$with_libxml" = "xno" ; then
AC_MSG_CHECKING(for libxml2 libraries >= $LIBXML_REQUIRED)
AC_MSG_ERROR([libxml2 >= $LIBXML_REQUIRED is required for libvirt])
-elif test "x$with_libxml" = "x" -a "x$PKG_CONFIG" != "x" ; then
+elif test "x$with_libxml" = "x" && test "x$PKG_CONFIG" != "x" ; then
PKG_CHECK_MODULES(LIBXML, libxml-2.0 >= $LIBXML_REQUIRED, [LIBXML_FOUND=yes], [LIBXML_FOUND=no])
fi
if test "$LIBXML_FOUND" = "no" ; then
@@ -661,7 +661,7 @@ AC_ARG_WITH([sasl],
SASL_CFLAGS=
SASL_LIBS=
if test "x$with_sasl" != "xno"; then
- if test "x$with_sasl" != "xyes" -a "x$with_sasl" != "xcheck"; then
+ if test "x$with_sasl" != "xyes" && test "x$with_sasl" != "xcheck"; then
SASL_CFLAGS="-I$with_sasl"
SASL_LIBS="-L$with_sasl"
fi
@@ -716,7 +716,7 @@ AC_ARG_WITH([yajl],
YAJL_CFLAGS=
YAJL_LIBS=
if test "x$with_yajl" != "xno"; then
- if test "x$with_yajl" != "xyes" -a "x$with_yajl" != "xcheck"; then
+ if test "x$with_yajl" != "xyes" && test "x$with_yajl" != "xcheck"; then
YAJL_CFLAGS="-I$with_yajl/include"
YAJL_LIBS="-L$with_yajl/lib"
fi
@@ -1004,7 +1004,7 @@ AC_ARG_WITH([numactl],
NUMACTL_CFLAGS=
NUMACTL_LIBS=
-if test "$with_qemu" = "yes" -a "$with_numactl" != "no"; then
+if test "$with_qemu" = "yes" && test "$with_numactl" != "no"; then
old_cflags="$CFLAGS"
old_libs="$LIBS"
if test "$with_numactl" = "check"; then
@@ -1062,7 +1062,7 @@ dnl
dnl libssh checks
dnl
-if test "$with_libssh2" != "yes" -a "$with_libssh2" != "no"; then
+if test "$with_libssh2" != "yes" && test "$with_libssh2" != "no"; then
libssh2_path="$with_libssh2"
elif test "$with_libssh2" = "yes"; then
libssh2_path="/usr/local/lib/"
@@ -1143,7 +1143,7 @@ dnl introduced in 0.4.0 release which need as minimum
dnl
CAPNG_CFLAGS=
CAPNG_LIBS=
-if test "$with_qemu" = "yes" -a "$with_capng" != "no"; then
+if test "$with_qemu" = "yes" && test "$with_capng" != "no"; then
old_cflags="$CFLAGS"
old_libs="$LIBS"
if test "$with_capng" = "check"; then
@@ -1453,7 +1453,7 @@ if test "$with_storage_disk" = "yes" -o "$with_storage_disk" = "check"; then
PARTED_FOUND=yes
fi
- if test "$with_storage_disk" != "no" -a "x$PKG_CONFIG" != "x" ; then
+ if test "$with_storage_disk" != "no" && test "x$PKG_CONFIG" != "x" ; then
PKG_CHECK_MODULES(LIBPARTED, libparted >= $PARTED_REQUIRED, [], [PARTED_FOUND=no])
fi
if test "$PARTED_FOUND" = "no"; then
@@ -1635,7 +1635,7 @@ else
fi
AC_MSG_RESULT($RUNNING_XEND)
-AM_CONDITIONAL([ENABLE_XEN_TESTS], [test "$RUNNING_XEN" != "no" -a "$RUNNING_XEND" != "no"])
+AM_CONDITIONAL([ENABLE_XEN_TESTS], [test "$RUNNING_XEN" != "no" && test "$RUNNING_XEND" != "no"])
AC_ARG_ENABLE([test-coverage],
AC_HELP_STRING([--enable-test-coverage], [turn on code coverage instrumentation @<:@default=no@:>@]),
--
1.7.0.3.435.g097f4
>From 95c8ddd2eca90e3024a6f74af84517c1e0115a60 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Wed, 24 Mar 2010 09:32:43 +0100
Subject: [PATCH 3/3] maint: add syntax-check rule to prohibit use of test's -a operator
* cfg.mk (sc_prohibit_test_minus_a): New rule.
---
cfg.mk | 6 ++++++
1 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/cfg.mk b/cfg.mk
index 2d0d278..4302338 100644
--- a/cfg.mk
+++ b/cfg.mk
@@ -269,6 +269,12 @@ sc_preprocessor_indentation:
echo '$(ME): skipping test $@: cppi not installed' 1>&2; \
fi
+# Using test's -a operator is not portable.
+sc_prohibit_test_minus_a:
+ @re='\<test .+ -[a] ' \
+ msg='use "test C1 && test C2, not "test C1 -''a C2"' \
+ $(_prohibit_regexp)
+
sc_copyright_format:
@$(VC_LIST_EXCEPT) | xargs grep -ni 'copyright .*Red 'Hat \
| grep -v Inc \
--
1.7.0.3.435.g097f4
15 years
[libvirt] Supporting hypervisor specific APIs in libvirt
by Anthony Liguori
Hi,
I've mentioned this to a few folks already but I wanted to start a
proper thread.
We're struggling in qemu with usability and one area that concerns me is
the disparity in features that are supported by qemu vs what's
implemented in libvirt.
This isn't necessarily libvirt's problem if it's mission is to provide a
common hypervisor API that covers the most commonly used features.
However, for qemu, we need an API that covers all of our features that
people can develop against. The ultimate question we need to figure out
is, should we encourage our users to always use libvirt or should we
build our own API for people (and libvirt) to consume.
I don't think it's necessarily a big technical challenge for libvirt to
support qemu more completely. I think it amounts to introducing a
series of virQemuXXXX APIs that implement qemu specific functions. Over
time, qemu specific APIs can be deprecated in favour of more generic
virDomain APIs.
What's the feeling about this from the libvirt side of things? Is there
interest in support hypervisor specific interfaces should we be looking
to provide our own management interface for libvirt to consume?
Regards,
Anthony Liguori
15 years
[libvirt] [PATCH] esx: Make the conf parser compare names case insensitive in VMX mode
by Matthias Bolte
The keys of entries in a VMX file are case insensitive. Both scsi0:1.fileName
and scsi0:1.filename are valid. Therefore, make the conf parser compare names
case insensitive in VMX mode to accept every capitalization variation.
Also add test cases for this.
---
src/util/conf.c | 5 ++-
tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx | 51 ++++++++++++++++++++++
tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml | 25 +++++++++++
tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx | 51 ++++++++++++++++++++++
tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml | 25 +++++++++++
tests/vmx2xmltest.c | 9 ++++
6 files changed, 165 insertions(+), 1 deletions(-)
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml
diff --git a/src/util/conf.c b/src/util/conf.c
index 24588c2..ae0459e 100644
--- a/src/util/conf.c
+++ b/src/util/conf.c
@@ -831,7 +831,10 @@ virConfGetValue(virConfPtr conf, const char *setting)
cur = conf->entries;
while (cur != NULL) {
- if ((cur->name != NULL) && (STREQ(cur->name, setting)))
+ if ((cur->name != NULL) &&
+ ((conf->flags & VIR_CONF_FLAG_VMX_FORMAT &&
+ STRCASEEQ(cur->name, setting)) ||
+ STREQ(cur->name, setting)))
return(cur->value);
cur = cur->next;
}
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx
new file mode 100644
index 0000000..3626c5e
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx
@@ -0,0 +1,51 @@
+CONFIG.VERSION = "8"
+VIRTUALHW.VERSION = "4"
+FLOPPY0.PRESENT = "FALSE"
+NVRAM = "FEDORA11.NVRAM"
+DEPLOYMENTPLATFORM = "WINDOWS"
+VIRTUALHW.PRODUCTCOMPATIBILITY = "HOSTED"
+TOOLS.UPGRADE.POLICY = "USEGLOBAL"
+POWERTYPE.POWEROFF = "DEFAULT"
+POWERTYPE.POWERON = "DEFAULT"
+POWERTYPE.SUSPEND = "DEFAULT"
+POWERTYPE.RESET = "DEFAULT"
+
+DISPLAYNAME = "FEDORA11"
+EXTENDEDCONFIGFILE = "FEDORA11.VMXF"
+
+SCSI0.PRESENT = "TRUE"
+SCSI0.SHAREDBUS = "NONE"
+SCSI0.VIRTUALDEV = "LSILOGIC"
+MEMSIZE = "1024"
+SCSI0:0.PRESENT = "TRUE"
+SCSI0:0.FILENAME = "FEDORA11.vmdk"
+SCSI0:0.DEVICETYPE = "SCSI-HARDDISK"
+IDE0:0.PRESENT = "TRUE"
+IDE0:0.CLIENTDEVICE = "TRUE"
+IDE0:0.DEVICETYPE = "CDROM-RAW"
+IDE0:0.STARTCONNECTED = "FALSE"
+ETHERNET0.PRESENT = "TRUE"
+ETHERNET0.NETWORKNAME = "VM NETWORK"
+ETHERNET0.ADDRESSTYPE = "VPX"
+ETHERNET0.GENERATEDADDRESS = "00:50:56:91:48:C7"
+CHIPSET.ONLINESTANDBY = "FALSE"
+GUESTOSALTNAME = "RED HAT ENTERPRISE LINUX 5 (32-BIT)"
+GUESTOS = "RHEL5"
+UUID.BIOS = "50 11 5E 16 9B DC 49 D7-F1 71 53 C4 D7 F9 17 10"
+SNAPSHOT.ACTION = "KEEP"
+SCHED.CPU.MIN = "0"
+SCHED.CPU.UNITS = "MHZ"
+SCHED.CPU.SHARES = "NORMAL"
+SCHED.MEM.MINSIZE = "0"
+SCHED.MEM.SHARES = "NORMAL"
+TOOLSCRIPTS.AFTERPOWERON = "TRUE"
+TOOLSCRIPTS.AFTERRESUME = "TRUE"
+TOOLSCRIPTS.BEFORESUSPEND = "TRUE"
+TOOLSCRIPTS.BEFOREPOWEROFF = "TRUE"
+
+SCSI0:0.REDO = ""
+TOOLS.SYNCTIME = "FALSE"
+UUID.LOCATION = "56 4D B5 06 A2 BD FB EB-AE 86 F7 D8 49 27 D0 C4"
+SCHED.CPU.MAX = "UNLIMITED"
+SCHED.SWAP.DERIVEDNAME = "/VMFS/VOLUMES/498076B2-02796C1A-EF5B-000AE484A6A3/FEDORA11/FEDORA11-7DE040D8.VSWP"
+TOOLS.REMINDINSTALL = "TRUE"
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml
new file mode 100644
index 0000000..0be570f
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml
@@ -0,0 +1,25 @@
+<domain type='vmware'>
+ <name>FEDORA11</name>
+ <uuid>50115e16-9bdc-49d7-f171-53c4d7f91710</uuid>
+ <memory>1048576</memory>
+ <currentMemory>1048576</currentMemory>
+ <vcpu>1</vcpu>
+ <os>
+ <type arch='i686'>hvm</type>
+ </os>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='LSILOGIC'/>
+ <source file='[datastore] directory/FEDORA11.vmdk'/>
+ <target dev='sda' bus='scsi'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address='00:50:56:91:48:c7'/>
+ <source bridge='VM NETWORK'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx
new file mode 100644
index 0000000..a485d03
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx
@@ -0,0 +1,51 @@
+config.version = "8"
+virtualhw.version = "4"
+floppy0.present = "false"
+nvram = "fedora11.nvram"
+deploymentplatform = "windows"
+virtualhw.productcompatibility = "hosted"
+tools.upgrade.policy = "useglobal"
+powertype.poweroff = "default"
+powertype.poweron = "default"
+powertype.suspend = "default"
+powertype.reset = "default"
+
+displayname = "fedora11"
+extendedconfigfile = "fedora11.vmxf"
+
+scsi0.present = "true"
+scsi0.sharedbus = "none"
+scsi0.virtualdev = "lsilogic"
+memsize = "1024"
+scsi0:0.present = "true"
+scsi0:0.filename = "fedora11.vmdk"
+scsi0:0.devicetype = "scsi-harddisk"
+ide0:0.present = "true"
+ide0:0.clientdevice = "true"
+ide0:0.devicetype = "cdrom-raw"
+ide0:0.startconnected = "false"
+ethernet0.present = "true"
+ethernet0.networkname = "vm network"
+ethernet0.addresstype = "vpx"
+ethernet0.generatedaddress = "00:50:56:91:48:c7"
+chipset.onlinestandby = "false"
+guestosaltname = "red hat enterprise linux 5 (32-bit)"
+guestos = "rhel5"
+uuid.bios = "50 11 5e 16 9b dc 49 d7-f1 71 53 c4 d7 f9 17 10"
+snapshot.action = "keep"
+sched.cpu.min = "0"
+sched.cpu.units = "mhz"
+sched.cpu.shares = "normal"
+sched.mem.minsize = "0"
+sched.mem.shares = "normal"
+toolscripts.afterpoweron = "true"
+toolscripts.afterresume = "true"
+toolscripts.beforesuspend = "true"
+toolscripts.beforepoweroff = "true"
+
+scsi0:0.redo = ""
+tools.synctime = "false"
+uuid.location = "56 4d b5 06 a2 bd fb eb-ae 86 f7 d8 49 27 d0 c4"
+sched.cpu.max = "unlimited"
+sched.swap.derivedname = "/vmfs/volumes/498076b2-02796c1a-ef5b-000ae484a6a3/fedora11/fedora11-7de040d8.vswp"
+tools.remindinstall = "true"
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml
new file mode 100644
index 0000000..766172f
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml
@@ -0,0 +1,25 @@
+<domain type='vmware'>
+ <name>fedora11</name>
+ <uuid>50115e16-9bdc-49d7-f171-53c4d7f91710</uuid>
+ <memory>1048576</memory>
+ <currentMemory>1048576</currentMemory>
+ <vcpu>1</vcpu>
+ <os>
+ <type arch='i686'>hvm</type>
+ </os>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='lsilogic'/>
+ <source file='[datastore] directory/fedora11.vmdk'/>
+ <target dev='sda' bus='scsi'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address='00:50:56:91:48:c7'/>
+ <source bridge='vm network'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/tests/vmx2xmltest.c b/tests/vmx2xmltest.c
index b4eb5d5..4c93059 100644
--- a/tests/vmx2xmltest.c
+++ b/tests/vmx2xmltest.c
@@ -26,6 +26,7 @@ testCompareFiles(const char *vmx, const char *xml, esxVI_APIVersion apiVersion)
char *vmxPtr = &(vmxData[0]);
char *xmlPtr = &(xmlData[0]);
virDomainDefPtr def = NULL;
+ virErrorPtr err = NULL;
if (virtTestLoadFile(vmx, &vmxPtr, MAX_FILE) < 0) {
goto failure;
@@ -39,12 +40,16 @@ testCompareFiles(const char *vmx, const char *xml, esxVI_APIVersion apiVersion)
apiVersion);
if (def == NULL) {
+ err = virGetLastError();
+ fprintf(stderr, "ERROR: %s\n", err != NULL ? err->message : "<unknown>");
goto failure;
}
formatted = virDomainDefFormat(def, VIR_DOMAIN_XML_SECURE);
if (formatted == NULL) {
+ err = virGetLastError();
+ fprintf(stderr, "ERROR: %s\n", err != NULL ? err->message : "<unknown>");
goto failure;
}
@@ -117,6 +122,10 @@ mymain(int argc, char **argv)
} \
} while (0)
+
+ DO_TEST("case-insensitive-1", "case-insensitive-1", esxVI_APIVersion_25);
+ DO_TEST("case-insensitive-2", "case-insensitive-2", esxVI_APIVersion_25);
+
DO_TEST("minimal", "minimal", esxVI_APIVersion_25);
DO_TEST("minimal-64bit", "minimal-64bit", esxVI_APIVersion_25);
--
1.6.3.3
15 years
[libvirt] [PATCH v3 00/14] Network filtering (ACL) extensions for libvirt
by stefanb@us.ibm.com
Hi!
This is a repost of this set of patches with some of the suggested fixes applied and ipv6 support on the ebtables layer added.
The following set of patches add network filtering (ACL) extensions to
libvirt and enable network traffic filtering for VMs using ebtables and,
depending on the networking technology being used (tap, but not
macvtap), also iptables. Usage of either is optional and controlled
through filters that a VM is referencing.
The ebtables-level filtering is based on the XML derived from the CIM
network slide 10 (filtering) from the DMTF website
(http://www.dmtf.org/standards/cim/cim_schema_v2230/CIM_Network.pdf).
The XML we derived from this was discussed on the list before. On the
ebtables level we currently handle filtering of IPv4 and ARP traffic.
The iptables-level filtering is based on similar XML where XML nodes
described the particular protocol to filter for. Its extensions enable
the filtering of traffic using iptables for tcp, udp, icmp, igmp, sctp
and 'all' types of traffic. This list of protocols maps to the features
supported by iptables and only excludes protocols like 'esp', 'ah' and
'udplite'. Currently only bridging mode is supported and based on
availability of the physdev match.
The filtering framework adds new libvirt virsh commands for managing
the filters. The 5 new commands are:
- virsh nwfilter-list
- virsh nwfilter-dumpxml <name of filter>
- virsh nwfilter-define <name of file containing filter desc.>
- virsh nwfilter-undefine <name of filter>
- virsh nwfilter-edit <name of filter>
Above commands are similar to commands for already existing pools and as
such much of the code directly related to the above commands could be
borrowed from other drivers.
The network filters can either contain rules using the above mentioned
XML or contain references to other filters in order to build more
complex filters that form some sort of filter tree or can contain both.
An example for a filter referencing other filters would be this one
here:
<filter name='demofilter4' chain='root'>
<uuid>66f62d1d-34c1-1421-824f-c62d5ee5e8b6</uuid>
<filterref filter='no-mac-spoofing'/>
<filterref filter='no-mac-broadcast'/>
<filterref filter='no-arp-spoofing'/>
<filterref filter='allow-dhcp'>
<parameter name='DHCPSERVER' value='10.0.0.1'/>
</filterref>
<filterref filter='no-other-l2-traffic'/>
<filterref filter='recv-only-vm-ipaddress'/>
<filterref filter='recv-only-vm-macaddress'/>
<filterref filter='l3-test'/>
<filterref filter='ipv6test'/>
</filter>
A filter containing actual rules would look like this:
<filter name='no-mac-broadcast' chain='ipv4'>
<uuid>ffe2ccd6-edec-7360-1852-6b5ccb553234</uuid>
<rule action='drop' direction='out' priority='500'>
<mac dstmacaddr='ff:ff:ff:ff:ff:ff'/>
</rule>
</filter>
The filter XML now also holds a priority attribute in the rule. This
provides control over the ordering of the applied ebtables/iptables
rules beyond their appearance in the XML.
The domain XML has been extended to reference a top level filter from
within each <interface> XML node. A valid reference to such a top level
filter looks like this:
<interface type='bridge'>
<source bridge='static'/>
<filterref filter='demofilter4'>
<parameter name='IP' value='9.59.241.151'/>
</filterref>
</interface>
In this XML a parameter IP is passed for instantiation of the referenced
filters, that may require the availability of this parameter. In the
above case the IP parameter's value describes the value of the IP
address of the VM and allows to enable those filters to be instantiated
that require this 'IP' variable. If a filter requires a parameter that
is not provided, the VM will not start or the interface will not attach
to a running VM. Any names of parameters can be provided for
instantiation of filters and their names and values only need to pass a
regular expression test. In a subsequent patch we will be adding
capability to allow users to omit the IP parameter (only) and enable
libvirt to learn the IP address of the VM and have it instantiate the
filter once it knows it.
While virtual machines are running, it is possible to update their
filters. For that all running VMs' filter 'trees' are traversed to
detect whether the updated filter is referenced by the VM. If so, its
ebtables/iptable rules are applied. If one of the VMs' update fails
allupdates are rolled back and the filter XML update is rejected.
One comment about the instantiation of the rules: Since the XML allows
to create nearly any possible combination of parameters to ebtables or
iptables commands, I haven't used the ebtables or iptables wrappers.
Instead, I am writing ebtables/iptables command into a buffer, add
command line options to each one of them as described in the rule's XML,
write the buffer into a file and run it as a script. For those commands
that are not allowed to fail I am using the following format to run
them:
cmd="ebtables <some options>"
r=`${cmd}`
if [ $? -ne 0 ]; then
echo "Failure in command ${cmd}."
exit 1
fi
cmd="..."
[...]
If one of the command fails in such a batch, the libvirt code is going
pick up the error code '1', tear down anything previously established
and report an error back. The actual error message shown above is
currently not reported back, but can be later on with some changes to
the commands running external programs that need to read the script's
stdout.
One comment to patch 14: It currently #include's a .c file into a .c
file only for the reason so I don't have to change too much code once I
change code in the underlying patch. So this has to be changed. The
patch series works without patch 13, but then only supports ebtables.
The patches apply to the current tip. They pass 'make syntax-check' and
have been frequently run in valgrind for memory leak checks.
Looking forward to your feedback on the patches.
Thanks and regards,
Stefan and Gerhard
15 years
[libvirt] [PATCH][QEMU driver] Catch cdrom change error
by Ryan Harper
Currently when we attempt to change the cdrom in a qemu VM the monitor
doesn't generate an error if the target filename doesn't exist. I've
submitted a patch[1] for this. This patch is the libvirt qemu-driver
side which catches the error message from the monitor and reportes the
error to libvirt. This means that virsh attach-disk cdrom commands
won't appear to succeed when qemu change command actually failed.
I've tested this patch on an older libvirt (rebuild libvirt-0.7.0 on my
ubuntu host) but haven't rebuilt against git head since I can't
quite compile that on my host.
1. http://lists.gnu.org/archive/html/qemu-devel/2010-03/msg01935.html
Signed-off-by: Ryan Harper <ryanh(a)us.ibm.com>
diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c
index 1596e59..f868907 100644
--- a/src/qemu/qemu_monitor_text.c
+++ b/src/qemu/qemu_monitor_text.c
@@ -905,7 +905,14 @@ int qemuMonitorTextChangeMedia(qemuMonitorPtr mon,
qemuReportError(VIR_ERR_OPERATION_FAILED,
_("could not eject media on %s: %s"), devname, reply);
goto cleanup;
- }
+ }
+
+ /* Could not open message indicates bad filename */
+ if (strstr(reply, "\nCould not open ")) {
+ qemuReportError(VIR_ERR_OPERATION_FAILED,
+ _("could not change media on %s: %s"), devname, reply);
+ goto cleanup;
+ }
ret = 0;
--
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
ryanh(a)us.ibm.com
15 years
[libvirt] [PATCH 00/13] Add support for all QEMU event notifications
by Daniel P. Berrange
This series adds support for (nearly) all QEMU event notifications
that are currently supported by the new JSON mode monitor.
- Guest reboot
- RTC adjustment
- Watchdog firing
- Block IO errors
- Graphics client connect/disconnect
It required two new public APIs, and a bunch of extra function
callback typedefs which will be shown in each following commit
message.
This scheme is extensible to support any further events against
domains.
It does not support events on other types of object (storage,
node devices, etc), but those can follow exactly the same API
design scheme should we need them in the future. We don't want
to unify them at the public API level, since internally they
need to dispatch to separate drivers anyway.
I'm generally happy with the public API for the events, with
the exception of the graphics event which is horribly complex
due to the large amount of data. I'm not sure if there's a way
to make that event nicer....
The full diffstat for all patches
daemon/dispatch.h | 10
daemon/libvirtd.c | 20
daemon/libvirtd.h | 2
daemon/remote.c | 348 ++++++++++++++--
daemon/remote_dispatch_args.h | 2
daemon/remote_dispatch_prototypes.h | 16
daemon/remote_dispatch_table.h | 37 +
examples/domain-events/events-c/event-test.c | 168 ++++++-
include/libvirt/libvirt.h.in | 106 ++++
src/conf/domain_event.c | 580 ++++++++++++++++++++++++---
src/conf/domain_event.h | 82 +++
src/driver.c | 4
src/driver.h | 18
src/esx/esx_driver.c | 2
src/libvirt.c | 133 +++++-
src/libvirt_private.syms | 16
src/libvirt_public.syms | 6
src/lxc/lxc_driver.c | 60 ++
src/opennebula/one_driver.c | 2
src/openvz/openvz_driver.c | 2
src/phyp/phyp_driver.c | 2
src/qemu/qemu_driver.c | 290 +++++++++++++
src/qemu/qemu_monitor.c | 77 +++
src/qemu/qemu_monitor.h | 39 +
src/qemu/qemu_monitor_json.c | 144 ++++++
src/remote/remote_driver.c | 459 +++++++++++++++++----
src/remote/remote_protocol.c | 111 +++++
src/remote/remote_protocol.h | 185 ++++++--
src/remote/remote_protocol.x | 65 ++-
src/test/test_driver.c | 58 ++
src/uml/uml_driver.c | 2
src/vbox/vbox_tmpl.c | 155 +++++--
src/xen/xen_driver.c | 71 ++-
src/xenapi/xenapi_driver.c | 2
34 files changed, 2947 insertions(+), 327 deletions(-)
Daniel
15 years
[libvirt] [PATCH 0/1] Disk error policy
by David Allan
The following patch adds support for setting the disk error policy to the domain XML and using it when setting up the qemu command line. It specifies the werror=somepolicy,rerror=somepolicy flags to tell qemu what to do in the case of an io error. This patch is my first foray into the setup of the qemu command line, so please review with heightened care.
Dave
David Allan (1):
Add disk error policy to domain XML
docs/schemas/domain.rng | 12 +++++++++++-
src/conf/domain_conf.c | 15 +++++++++++++++
src/conf/domain_conf.h | 10 ++++++++++
src/libvirt_private.syms | 2 +-
src/qemu/qemu_conf.c | 12 +++++++++---
tests/qemuhelptest.c | 1 +
tests/qemuxml2argvtest.c | 3 +++
7 files changed, 50 insertions(+), 5 deletions(-)
15 years
[libvirt] [PATCH] Add entry point logging for cpu functions
by Jiri Denemark
---
src/cpu/cpu.c | 46 ++++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 44 insertions(+), 2 deletions(-)
diff --git a/src/cpu/cpu.c b/src/cpu/cpu.c
index be0f15e..183862a 100644
--- a/src/cpu/cpu.c
+++ b/src/cpu/cpu.c
@@ -23,6 +23,7 @@
#include <config.h>
+#include "logging.h"
#include "memory.h"
#include "xml.h"
#include "cpu.h"
@@ -73,6 +74,8 @@ cpuCompareXML(virCPUDefPtr host,
virCPUDefPtr cpu = NULL;
virCPUCompareResult ret = VIR_CPU_COMPARE_ERROR;
+ VIR_DEBUG("host=%p, xml=%s", host, NULLSTR(xml));
+
if (!(doc = virXMLParseString(xml, "cpu.xml")))
goto cleanup;
@@ -104,6 +107,8 @@ cpuCompare(virCPUDefPtr host,
{
struct cpuArchDriver *driver;
+ VIR_DEBUG("host=%p, cpu=%p", host, cpu);
+
if ((driver = cpuGetSubDriver(host->arch)) == NULL)
return VIR_CPU_COMPARE_ERROR;
@@ -126,6 +131,13 @@ cpuDecode(virCPUDefPtr cpu,
{
struct cpuArchDriver *driver;
+ VIR_DEBUG("cpu=%p, data=%p, nmodels=%u", cpu, data, nmodels);
+ if (models) {
+ unsigned int i;
+ for (i = 0; i < nmodels; i++)
+ VIR_DEBUG("models[%u]=%s", i, NULLSTR(models[i]));
+ }
+
if (models == NULL && nmodels != 0) {
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("nonzero nmodels doesn't match with NULL models"));
@@ -163,6 +175,11 @@ cpuEncode(const char *arch,
{
struct cpuArchDriver *driver;
+ VIR_DEBUG("arch=%s, cpu=%p, forced=%p, required=%p, "
+ "optional=%p, disabled=%p, forbidden=%p",
+ NULLSTR(arch), cpu, forced, required,
+ optional, disabled, forbidden);
+
if ((driver = cpuGetSubDriver(arch)) == NULL)
return -1;
@@ -184,6 +201,8 @@ cpuDataFree(const char *arch,
{
struct cpuArchDriver *driver;
+ VIR_DEBUG("arch=%s, data=%p", NULLSTR(arch), data);
+
if (data == NULL)
return;
@@ -206,6 +225,8 @@ cpuNodeData(const char *arch)
{
struct cpuArchDriver *driver;
+ VIR_DEBUG("arch=%s", NULLSTR(arch));
+
if ((driver = cpuGetSubDriver(arch)) == NULL)
return NULL;
@@ -227,6 +248,8 @@ cpuGuestData(virCPUDefPtr host,
{
struct cpuArchDriver *driver;
+ VIR_DEBUG("host=%p, guest=%p, data=%p", host, guest, data);
+
if ((driver = cpuGetSubDriver(host->arch)) == NULL)
return VIR_CPU_COMPARE_ERROR;
@@ -254,6 +277,16 @@ cpuBaselineXML(const char **xmlCPUs,
char *cpustr;
unsigned int i;
+ VIR_DEBUG("ncpus=%u, nmodels=%u", ncpus, nmodels);
+ if (xmlCPUs) {
+ for (i = 0; i < ncpus; i++)
+ VIR_DEBUG("xmlCPUs[%u]=%s", i, NULLSTR(xmlCPUs[i]));
+ }
+ if (models) {
+ for (i = 0; i < nmodels; i++)
+ VIR_DEBUG("models[%u]=%s", i, NULLSTR(models[i]));
+ }
+
if (xmlCPUs == NULL && ncpus != 0) {
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("nonzero ncpus doesn't match with NULL xmlCPUs"));
@@ -320,6 +353,17 @@ cpuBaseline(virCPUDefPtr *cpus,
{
struct cpuArchDriver *driver;
virCPUDefPtr cpu;
+ unsigned int i;
+
+ VIR_DEBUG("ncpus=%u, nmodels=%u", ncpus, nmodels);
+ if (cpus) {
+ for (i = 0; i < ncpus; i++)
+ VIR_DEBUG("cpus[%u]=%p", i, cpus[i]);
+ }
+ if (models) {
+ for (i = 0; i < nmodels; i++)
+ VIR_DEBUG("models[%u]=%s", i, NULLSTR(models[i]));
+ }
if (cpus == NULL && ncpus != 0) {
virCPUReportError(VIR_ERR_INTERNAL_ERROR,
@@ -349,8 +393,6 @@ cpuBaseline(virCPUDefPtr *cpus,
}
if ((cpu = driver->baseline(cpus, ncpus, models, nmodels))) {
- int i;
-
cpu->type = VIR_CPU_TYPE_GUEST;
cpu->match = VIR_CPU_MATCH_EXACT;
VIR_FREE(cpu->arch);
--
1.7.0.3
15 years