Devel
Threads by month
- ----- 2026 -----
- April
- March
- February
- January
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2005 -----
- December
- 24 participants
- 40181 discussions
To find out where the net type 'direct' needs to be handled I introduced
the 'enum virDomainNetType' in the virDomainNetDef structure and let the
compiler tell me where the case statement is missing. Then I added the
unhandled device statement to the UML driver.
Signed-off-by; Stefan Berger <stefanb(a)us.ibm.com>
Index: libvirt-plain/src/conf/domain_conf.h
===================================================================
--- libvirt-plain.orig/src/conf/domain_conf.h
+++ libvirt-plain/src/conf/domain_conf.h
@@ -251,7 +251,7 @@ enum virDomainNetdevMacvtapType {
typedef struct _virDomainNetDef virDomainNetDef;
typedef virDomainNetDef *virDomainNetDefPtr;
struct _virDomainNetDef {
- int type;
+ enum virDomainNetType type;
unsigned char mac[VIR_MAC_BUFLEN];
char *model;
union {
Index: libvirt-plain/src/lxc/lxc_driver.c
===================================================================
--- libvirt-plain.orig/src/lxc/lxc_driver.c
+++ libvirt-plain/src/lxc/lxc_driver.c
@@ -800,6 +800,16 @@ static int lxcSetupInterfaces(virConnect
case VIR_DOMAIN_NET_TYPE_BRIDGE:
bridge = def->nets[i]->data.bridge.brname;
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_ETHERNET:
+ case VIR_DOMAIN_NET_TYPE_SERVER:
+ case VIR_DOMAIN_NET_TYPE_CLIENT:
+ case VIR_DOMAIN_NET_TYPE_MCAST:
+ case VIR_DOMAIN_NET_TYPE_INTERNAL:
+ case VIR_DOMAIN_NET_TYPE_DIRECT:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
DEBUG("bridge: %s", bridge);
Index: libvirt-plain/src/qemu/qemu_conf.c
===================================================================
--- libvirt-plain.orig/src/qemu/qemu_conf.c
+++ libvirt-plain/src/qemu/qemu_conf.c
@@ -2686,6 +2686,14 @@ qemuBuildHostNetStr(virDomainNetDefPtr n
net->data.socket.address,
net->data.socket.port);
break;
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_ETHERNET:
+ case VIR_DOMAIN_NET_TYPE_NETWORK:
+ case VIR_DOMAIN_NET_TYPE_BRIDGE:
+ case VIR_DOMAIN_NET_TYPE_INTERNAL:
+ case VIR_DOMAIN_NET_TYPE_DIRECT:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
type_sep = ',';
break;
Index: libvirt-plain/src/uml/uml_conf.c
===================================================================
--- libvirt-plain.orig/src/uml/uml_conf.c
+++ libvirt-plain/src/uml/uml_conf.c
@@ -244,6 +244,14 @@ umlBuildCommandLineNet(virConnectPtr con
umlReportError(conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR, "%s",
_("internal networking type not supported"));
goto error;
+
+ case VIR_DOMAIN_NET_TYPE_DIRECT:
+ umlReportError(conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR, "%s",
+ _("direct networking type not supported"));
+ goto error;
+
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
virBufferVSprintf(&buf, ",%02x:%02x:%02x:%02x:%02x:%02x",
Index: libvirt-plain/src/conf/domain_conf.c
===================================================================
--- libvirt-plain.orig/src/conf/domain_conf.c
+++ libvirt-plain/src/conf/domain_conf.c
@@ -450,6 +450,10 @@ void virDomainNetDefFree(virDomainNetDef
case VIR_DOMAIN_NET_TYPE_DIRECT:
VIR_FREE(def->data.direct.linkdev);
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
VIR_FREE(def->ifname);
@@ -1740,7 +1744,7 @@ virDomainNetDefParseXML(virCapsPtr caps,
type = virXMLPropString(node, "type");
if (type != NULL) {
- if ((def->type = virDomainNetTypeFromString(type)) < 0) {
+ if ((int)(def->type = virDomainNetTypeFromString(type)) < 0) {
virDomainReportError(VIR_ERR_INTERNAL_ERROR,
_("unknown interface type '%s'"), type);
goto error;
@@ -1949,6 +1953,10 @@ virDomainNetDefParseXML(virCapsPtr caps,
dev = NULL;
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
if (ifname != NULL) {
@@ -4861,6 +4869,10 @@ virDomainNetDefFormat(virBufferPtr buf,
virDomainNetdevMacvtapTypeToString(def->data.direct.mode));
virBufferAddLit(buf, "/>\n");
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
if (def->ifname)
2
1
Re: [libvirt] [Qemu-devel] Re: Supporting hypervisor specific APIs in libvirt
by Anthony Liguori 26 Mar '10
by Anthony Liguori 26 Mar '10
26 Mar '10
On 03/23/2010 10:57 AM, Paul Brook wrote:
>>> I think there is a serious divergence of approach there, instanciating
>>> API stating 'we are gonna deprecate them sooner or later' tell the
>>> application developper 'my time is more important than yours' and not
>>> really something I like to carry to the API users.
>>> The main goal of libvirt remains to provide APIs needed to unify the
>>> development of the virtualization layers. Having APIs which makes
>>> sense only for one or 2 virtualization engines is not a problem in
>>> itself, it just raises questions about the actual semantic of that API.
>>> If that semantic is sound, then I see no reason to not add it, really
>>> and we actually often do.
>>>
>> Yeah, but the problem we're facing is, I want there to be an API added
>> to the management layer as part of the feature commit in qemu. If there
>> has to be a discussion and decisions about how to model the API, it's
>> not going to be successful.
>>
> I thought the monitor protocol *was* our API. If not, why not?
>
It is. But our API is missing key components like guest enumeration.
So the fundamental topic here is, do we introduce these missing
components to allow people to build directly to our interface or do we
make use of the functionality that libvirt already provides if they can
plumb our API directly to users.
Regards,
Anthony Liguori
> Paul
>
10
65
26 Mar '10
In the web documentation mention that the direct device support is there
since libvirt 0.7.7. A Linux kernel 2.6.34 is required for macvtap to be
available as standard device.
Index: libvirt-plain/docs/formatdomain.html.in
===================================================================
--- libvirt-plain.orig/docs/formatdomain.html.in
+++ libvirt-plain/docs/formatdomain.html.in
@@ -741,8 +741,11 @@
<p>
Provides direct attachment of the virtual machine's NIC to the given
- physial interface of the host. This setup requires the Linux macvtap
- driver to be available. One of the modes 'vepa'
+ physial interface of the host.
+ <span class="since">Since 0.7.7 (QEMU and KVM only)</span><br>
+ This setup requires the Linux macvtap
+ driver to be available. <span class="since">(Since Linux 2.6.34.)</span>
+ One of the modes 'vepa'
( <a href="http://www.ieee802.org/1/files/public/docs2009/new-evb-congdon-vepa-modular…">
'Virtual Ethernet Port Aggregator'</a>), 'bridge' or 'private'
can be chosen for the operation mode of the macvtap device, 'vepa'
2
1
[libvirt] [PATCH] Fix QEMU cpu affinity at startup to include all threads
by Daniel P. Berrange 26 Mar '10
by Daniel P. Berrange 26 Mar '10
26 Mar '10
The QEMU cpu affinity is used in NUMA scenarios to ensure that
guest memory is allocated from a specific node. Normally memory
is allocate on demand in vCPU threads, but when using hugepages
the initial thread leader allocates memory upfront. libvirt was
not setting affinity of the thread leader, or I/O threads. This
patch changes the code to set the process affinity in between
the fork()/exec() of QEMU. This ensures that every single QEMU
thread gets the affinity
* src/qemu/qemu_driver.c: Set affinity on entire QEMU process
at startup
---
src/qemu/qemu_driver.c | 29 ++++++++++++++++-------------
1 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 257f914..2598deb 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1701,6 +1701,9 @@ qemuDetectVcpuPIDs(struct qemud_driver *driver,
return 0;
}
+/*
+ * To be run between fork/exec of QEMU only
+ */
static int
qemudInitCpuAffinity(virDomainObjPtr vm)
{
@@ -1708,7 +1711,8 @@ qemudInitCpuAffinity(virDomainObjPtr vm)
virNodeInfo nodeinfo;
unsigned char *cpumap;
int cpumaplen;
- qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ DEBUG0("Setting CPU affinity");
if (nodeGetInfo(NULL, &nodeinfo) < 0)
return -1;
@@ -1740,14 +1744,14 @@ qemudInitCpuAffinity(virDomainObjPtr vm)
VIR_USE_CPU(cpumap, i);
}
- /* The XML config only gives a per-VM affinity, so we apply
- * the same mapping to all vCPUs */
- for (i = 0 ; i < priv->nvcpupids ; i++) {
- if (virProcessInfoSetAffinity(priv->vcpupids[i],
- cpumap, cpumaplen, maxcpu) < 0) {
- VIR_FREE(cpumap);
- return -1;
- }
+ /* We are assuming we are running between fork/exec of QEMU, so
+ * that getpid() gives the QEMU process ID and we know that
+ * no threads are running.
+ */
+ if (virProcessInfoSetAffinity(getpid(),
+ cpumap, cpumaplen, maxcpu) < 0) {
+ VIR_FREE(cpumap);
+ return -1;
}
VIR_FREE(cpumap);
@@ -2653,6 +2657,9 @@ struct qemudHookData {
static int qemudSecurityHook(void *data) {
struct qemudHookData *h = data;
+ if (qemudInitCpuAffinity(h->vm) < 0)
+ return -1;
+
if (qemuAddToCgroup(h->driver, h->vm->def) < 0)
return -1;
@@ -2943,10 +2950,6 @@ static int qemudStartVMDaemon(virConnectPtr conn,
if (qemuDetectVcpuPIDs(driver, vm) < 0)
goto abort;
- DEBUG0("Setting CPU affinity");
- if (qemudInitCpuAffinity(vm) < 0)
- goto abort;
-
DEBUG0("Setting any required VM passwords");
if (qemuInitPasswords(conn, driver, vm, qemuCmdFlags) < 0)
goto abort;
--
1.6.2.5
2
1
[libvirt] [PATCH][Network] Make dhcp service enabled only if //ip/dhcp exists in network xml
by Satoru SATOH 26 Mar '10
by Satoru SATOH 26 Mar '10
26 Mar '10
Libvirtd enabls DHCP service on virtual networks even if the element
'//ip/dhcp' does not exist in the network xml. The following patch fixes
this problem.
Signed-off-by: Satoru SATOH <satoru.satoh(a)gmail.com>
---
src/conf/network_conf.c | 2 ++
src/conf/network_conf.h | 2 ++
src/network/bridge_driver.c | 5 +----
3 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/src/conf/network_conf.c b/src/conf/network_conf.c
index 1f3a44c..e41775a 100644
--- a/src/conf/network_conf.c
+++ b/src/conf/network_conf.c
@@ -466,6 +466,8 @@ virNetworkDefParseXML(xmlXPathContextPtr ctxt)
if ((ip = virXPathNode("./ip[1]", ctxt)) &&
virNetworkIPParseXML(def, ip) < 0)
goto error;
+
+ def->dhcp = (virXPathNode("./ip/dhcp", ctxt) != NULL ? 1 : 0);
}
diff --git a/src/conf/network_conf.h b/src/conf/network_conf.h
index 127a23a..847ddd3 100644
--- a/src/conf/network_conf.h
+++ b/src/conf/network_conf.h
@@ -74,6 +74,8 @@ struct _virNetworkDef {
char *netmask;
char *network;
+ unsigned int dhcp;
+
unsigned int nranges; /* Zero or more dhcp ranges */
virNetworkDHCPRangeDefPtr ranges;
diff --git a/src/network/bridge_driver.c b/src/network/bridge_driver.c
index 83ab00e..6dcf7b4 100644
--- a/src/network/bridge_driver.c
+++ b/src/network/bridge_driver.c
@@ -943,12 +943,9 @@ static int networkStartNetworkDaemon(struct network_driver *driver,
goto err_delbr2;
}
- if ((network->def->ipAddress ||
- network->def->nranges) &&
- dhcpStartDhcpDaemon(network) < 0)
+ if (network->def->dhcp && dhcpStartDhcpDaemon(network) < 0)
goto err_delbr2;
-
/* Persist the live configuration now we have bridge info */
if (virNetworkSaveConfig(NETWORK_STATE_DIR, network->def) < 0) {
goto err_kill;
--
1.6.2.5
2
2
[libvirt] [PATCH 1/1] XenAPI remote storage support on libvirt
by Sharadha Prabhakar (3P) 26 Mar '10
by Sharadha Prabhakar (3P) 26 Mar '10
26 Mar '10
This patch contains the APIs for support XenAPI remote storage support on libvirt.
This patch allows you to list storage pools, storage volumes, get information
about storage pools and volumes and create storage pools of type NETFS
with format type nfs,cifs-iso,nfs-iso using virsh. You can also create
VMs with storage pools attached and destroy storage pools.
While creating a VM with storage. The disk tag's source element should be
of the form '/storage pool uuid/storage volume uuid'.
--- ./libvirt_org/src/xenapi/xenapi_storage_driver.c 1970-01-01 01:00:00.000000000 +0100
+++ ./libvirt/src/xenapi/xenapi_storage_driver.c 2010-03-24 15:27:43.000000000 +0000
@@ -0,0 +1,1499 @@
+/*
+ * xenapi_storage_driver.c: Xen API storage driver APIs
+ * Copyright (C) 2009, 2010 Citrix Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Sharadha Prabhakar <sharadha.prabhakar(a)citrix.com>
+ */
+
+#include <config.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <libxml/uri.h>
+#include <xen_internal.h>
+#include <libxml/parser.h>
+#include <curl/curl.h>
+#include <xen/api/xen_common.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_all.h>
+#include <xen/api/xen_vm_metrics.h>
+#include <xen/api/xen_api_failure.h>
+#include <xen/dom0_ops.h>
+
+#include "libvirt_internal.h"
+#include "libvirt/libvirt.h"
+#include "virterror_internal.h"
+#include "storage_conf.h"
+#include "datatypes.h"
+#include "xenapi_driver.h"
+#include "util.h"
+#include "uuid.h"
+#include "authhelper.h"
+#include "memory.h"
+#include "driver.h"
+#include "util/logging.h"
+#include "buf.h"
+#include "xenapi_utils.h"
+#include "xenapi_storage_driver.h"
+
+/*
+*XenapiStorageOpen
+*
+*Authenticates and creates a session with the server
+*Returns VIR_DRV_OPEN_SUCCESS on success, else VIR_DRV_OPEN_ERROR
+*/
+static virDrvOpenStatus
+xenapiStorageOpen (virConnectPtr conn, virConnectAuthPtr auth, int flags ATTRIBUTE_UNUSED)
+{
+ char *username = NULL;
+ char *password = NULL;
+ struct _xenapiStoragePrivate *privP = NULL;
+
+ if (conn->uri == NULL || conn->uri->scheme == NULL ||
+ STRCASENEQ(conn->uri->scheme, "XenAPI")) {
+ return VIR_DRV_OPEN_DECLINED;
+ }
+
+ if (conn->uri->server == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Server name not in URI");
+ goto error;
+ }
+
+ if (auth == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Authentication Credentials not found");
+ goto error;
+ }
+
+ if (conn->uri->user != NULL) {
+ username = strdup(conn->uri->user);
+
+ if (username == NULL) {
+ virReportOOMError();
+ goto error;
+ }
+ } else {
+ username = virRequestUsername(auth, NULL, conn->uri->server);
+
+ if (username == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Username request failed");
+ goto error;
+ }
+ }
+
+ password = virRequestPassword(auth, username, conn->uri->server);
+
+ if (password == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Password request failed");
+ goto error;
+ }
+
+ if (VIR_ALLOC(privP) < 0) {
+ virReportOOMError();
+ goto error;
+ }
+
+ if (virAsprintf(&privP->url, "https://%s", conn->uri->server) < 0) {
+ virReportOOMError();
+ goto error;
+ }
+
+ if (xenapiUtil_ParseQuery(conn, conn->uri, &privP->noVerify) < 0)
+ goto error;
+
+ xmlInitParser();
+ xmlKeepBlanksDefault(0);
+ xen_init();
+ curl_global_init(CURL_GLOBAL_ALL);
+
+ privP->session = xen_session_login_with_password(call_func, privP, username,
+ password, xen_api_latest_version);
+
+ if (privP->session != NULL && privP->session->ok) {
+ conn->storagePrivateData = privP;
+ VIR_FREE(username);
+ VIR_FREE(password);
+ return VIR_DRV_OPEN_SUCCESS;
+ }
+
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED, "");
+
+ error:
+ VIR_FREE(username);
+ VIR_FREE(password);
+
+ if (privP != NULL) {
+ if (privP->session != NULL)
+ xenSessionFree(privP->session);
+
+ VIR_FREE(privP->url);
+ VIR_FREE(privP);
+ }
+
+ return VIR_DRV_OPEN_ERROR;
+}
+
+
+/*
+*XenapiStorageClose
+*
+*Closes the session with the server
+*Returns 0 on success
+*/
+static int
+xenapiStorageClose (virConnectPtr conn)
+{
+ struct _xenapiStoragePrivate *priv = (struct _xenapiStoragePrivate *)conn->storagePrivateData;
+ xen_session_logout(priv->session);
+ VIR_FREE(priv->url);
+ VIR_FREE(priv);
+ return 0;
+
+}
+
+/*
+*XenapiNumOfStoragePools
+*
+*Provides the number of active storage pools
+*Returns number of pools found on success, or -1 on error
+*/
+static int
+xenapiNumOfStoragePools (virConnectPtr conn)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ bool currently_attached;
+ int cnt=-1,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; i<sr_set->size; i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 1) cnt++;
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return cnt;
+}
+
+/*
+*XenapiListStoragePools
+*
+*Provides the list of names of active storage pools upto maxnames
+*returns number of names in the list on success ,or -1 or error
+*/
+static int
+xenapiListStoragePools (virConnectPtr conn, char **const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ char *usenames=NULL;
+ bool currently_attached;
+ int count=0,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; (i<sr_set->size) && (count<maxnames); i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 1) {
+ if(!(usenames = strdup(record->name_label))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ names[count++] = usenames;
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ goto cleanup;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+ cleanup:
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ while (--count>=0) VIR_FREE(names[count]);
+ return -1;
+}
+
+
+/*
+*XenapiListDefinedStoragePools
+*
+*Provides the list of names of inactive storage pools upto maxnames
+*
+*/
+static int
+xenapiListDefinedStoragePools (virConnectPtr conn, char **const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ char *usenames=NULL;
+ bool currently_attached;
+ int count=0,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; (i<sr_set->size) && (count<maxnames); i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 0) {
+ if(!(usenames = strdup(record->name_label))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ names[count++] = usenames;
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ goto cleanup;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+
+ cleanup:
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ while (--count>=0) free(names[count]);
+ return -1;
+}
+
+
+/*
+*XenapiNumOfDefinedStoragePools
+*
+*Provides the number of inactive storage pools
+*
+*/
+static int
+xenapiNumOfDefinedStoragePools (virConnectPtr conn)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ int cnt=-1,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; i<sr_set->size; i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ bool currently_attached;
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 0) cnt++;
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return cnt;
+}
+
+/*
+*XenapiStoragePoolCreateXML
+*
+*Creates a Storage Pool from the given XML
+* Only storage pool type NETFS is supported for now
+*/
+static virStoragePoolPtr
+xenapiStoragePoolCreateXML (virConnectPtr conn, const char *xmlDesc,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ virStoragePoolDefPtr pdef = NULL;
+ char *pooltype=NULL;
+ xen_sr sr=NULL;
+ xen_host host=NULL;
+ virBuffer path = VIR_BUFFER_INITIALIZER;
+ xen_string_string_map *device_config=NULL,*smconfig=NULL;
+ virStoragePoolPtr poolPtr = NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_sr_record *sr_record = NULL;
+ xen_session *session = ((struct _xenapiPrivate *)(conn->privateData))->session;
+ if(!(pdef = virStoragePoolDefParseString(xmlDesc))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't parse XML");
+ virBufferFreeAndReset(&path);
+ return NULL;
+ }
+ if (pdef->type != VIR_STORAGE_POOL_NETFS) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Only Pool type NETFS is currently supported");
+ goto cleanup;
+ } else {
+ if (pdef->source.format == VIR_STORAGE_POOL_NETFS_NFS_ISO) {
+ pooltype = (char *)"iso";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Host name required for creating NFS ISO SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating NFS ISO SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(1);
+ if (!(device_config->contents[0].key = strdup("location"))) {
+ goto cleanup_device_config;
+ }
+ virBufferVSprintf(&path,"%s:%s",pdef->source.host.name, pdef->source.dir);
+ device_config->contents[0].val = virBufferContentAndReset(&path);
+ smconfig = xen_string_string_map_alloc(0);
+ }
+ else if (pdef->source.format == VIR_STORAGE_POOL_NETFS_CIFS_ISO) {
+ pooltype = (char *)"iso";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Host name required for creating CIFS ISO SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating CIFS ISO SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(1);
+ if (!(device_config->contents[0].key = strdup("location")))
+ goto cleanup_device_config;
+
+ if (pdef->source.host.name[0] != '/') {
+ virBufferVSprintf(&path,"//%s%s",pdef->source.host.name, pdef->source.dir);
+ }
+ else {
+ virBufferVSprintf(&path,"%s%s",pdef->source.host.name, pdef->source.dir);
+ }
+ device_config->contents[0].val = virBufferContentAndReset(&path);
+ smconfig = xen_string_string_map_alloc(1);
+ if (!(smconfig->contents[0].key = strdup("iso_type"))) {
+ xen_string_string_map_free(smconfig);
+ xen_string_string_map_free(device_config);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ if (!(smconfig->contents[0].val = strdup("cifs"))) {
+ xen_string_string_map_free(smconfig);
+ xen_string_string_map_free(device_config);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ }
+ else if (pdef->source.format == VIR_STORAGE_POOL_NETFS_NFS) {
+ pooltype = (char *)"nfs";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Server name required for creating NFS SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating NFS SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(2);
+ if (!(device_config->contents[0].key = strdup("server")))
+ goto cleanup_device_config;
+ if (!(device_config->contents[0].val = strdup(pdef->source.host.name)))
+ goto cleanup_device_config;
+ if (!(device_config->contents[1].key = strdup("serverpath")))
+ goto cleanup_device_config;
+ if (!(device_config->contents[1].val = strdup(pdef->source.dir)))
+ goto cleanup_device_config;
+ smconfig = xen_string_string_map_alloc(0);
+ virBufferFreeAndReset(&path);
+ }
+ else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Format type of NETFS not supported by the hypervisor");
+ goto cleanup;
+ }
+ }
+ if (!xen_session_get_this_host(session, &host, session)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ if (!xen_sr_create(session, &sr, host, device_config, 0, pdef->name, (char *)"",
+ pooltype, (char *) "iso", true, smconfig)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ xen_host_free(host);
+ return NULL;
+ }
+ if (!xen_sr_get_record(session, &sr_record, sr)){
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ xen_host_free(host);
+ xen_sr_free(sr);
+ return NULL;
+ }
+ virUUIDParse(sr_record->uuid,raw_uuid);
+ poolPtr = virGetStoragePool(conn,(const char *)sr_record->name_label,raw_uuid);
+ if (!poolPtr) xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get a valid storage pool pointer");
+ virStoragePoolDefFree(pdef);
+ xen_sr_record_free(sr_record);
+ xen_host_free(host);
+ return poolPtr;
+
+ cleanup_device_config:
+ xen_string_string_map_free(device_config);
+
+ cleanup:
+ virStoragePoolDefFree(pdef);
+ virBufferFreeAndReset(&path);
+ return NULL;
+}
+
+static int
+xenapiStoragePoolBuild (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ return 0; /* return SUCCESS for now */
+}
+
+
+static int
+xenapiStoragePoolCreate (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+
+/*
+*XenapiStoragePoolSetAutostart
+*
+*Autostart option is always ON by default and is not allowed to be OFF
+*
+*/
+static int
+xenapiStoragePoolSetAutostart (virStoragePoolPtr pool, int autostart)
+{
+ virConnectPtr conn = pool->conn;
+ if (autostart == 1) {
+ VIR_DEBUG0("XenAPI storage pools autostart option is always ON by default");
+ return 0;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Hypervisor doesn't allow autostart to be OFF");
+ return -1;
+ }
+}
+
+
+/*
+*XenapiStoragePoolGetAutostart
+*
+*Returns the storage pool autostart option. Which is always ON
+*
+*/
+static int
+xenapiStoragePoolGetAutostart (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ int * autostart)
+{
+ *autostart=1; /* XenAPI storage pools always have autostart set to ON */
+ return 0;
+}
+
+
+/*
+*XenapiStoragePoolLookupByName
+*
+* storage pool based on its unique name
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByName (virConnectPtr conn,
+ const char * name)
+{
+ virStoragePoolPtr poolPtr=NULL;
+ xen_sr_record *record=NULL;
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_by_name_label(session, &sr_set, (char *)name) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ sr = sr_set->contents[0];
+ if (!xen_sr_get_record(session, &record, sr)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ virUUIDParse(record->uuid,raw_uuid);
+ if (!(poolPtr = virGetStoragePool(conn,name,raw_uuid)))
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool pointer not available");
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return poolPtr;
+}
+
+
+/*
+*XenapiStoragePoolGetXMLDesc
+*
+*Returns the configuration of a storage pool as XML
+*
+*/
+static char *
+xenapiStoragePoolGetXMLDesc (virStoragePoolPtr pool,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ xen_sr_record *record=NULL;
+ xen_sr sr=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_pbd pbd=NULL;
+ char *pathDetails = NULL, *host=NULL, *path=NULL,*xml=NULL;
+ virConnectPtr conn = pool->conn;
+ virStoragePoolDefPtr pdef=NULL;
+ xen_string_string_map *smconfig=NULL;
+ bool cifs;
+ xen_string_string_map *deviceConfig=NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ int i;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr)) {
+ if (!xen_sr_get_record(session, &record, sr)) {
+ xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get SR information");
+ return NULL;
+ }
+ if (VIR_ALLOC(pdef)<0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ return NULL;
+ }
+ if (STREQ(record->type,"nfs") || STREQ(record->type,"iso"))
+ pdef->type = VIR_STORAGE_POOL_NETFS;
+ else if(STREQ(record->type,"iscsi"))
+ pdef->type = VIR_STORAGE_POOL_ISCSI;
+ else if(STREQ(record->type,"file"))
+ pdef->type = VIR_STORAGE_POOL_DIR;
+ else if(STREQ(record->type,"lvm"))
+ pdef->type = VIR_STORAGE_POOL_LOGICAL;
+ else if(STREQ(record->type,"ext")) {
+ pdef->type = VIR_STORAGE_POOL_FS;
+ pdef->source.format = VIR_STORAGE_POOL_FS_EXT3;
+ }
+ else if(STREQ(record->type,"hba"))
+ pdef->type = VIR_STORAGE_POOL_SCSI;
+
+ if (!(pdef->name = strdup(record->name_label))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ virUUIDParse(record->uuid,pdef->uuid);
+ pdef->allocation = (record->virtual_allocation)/1024;
+ pdef->capacity = (record->physical_size)/1024;
+ pdef->available = (record->physical_size - record->physical_utilisation)/1024;
+
+ if (STREQ(record->type,"iso")) {
+ if (xen_sr_get_sm_config(session, &smconfig, sr)){
+ cifs = false;
+ for (i=0;i<smconfig->size;i++){
+ if (STREQ(smconfig->contents[i].key,"iso_type")
+ && STREQ(smconfig->contents[i].val, "cifs"))
+ cifs = true;
+ break;
+ }
+ xen_string_string_map_free(smconfig);
+ xen_sr_get_pbds (session, &pbd_set, sr);
+ pbd = pbd_set->contents[0];
+ xen_pbd_get_device_config(session, &deviceConfig, pbd);
+ if (deviceConfig) {
+ for (i=0;i<deviceConfig->size;i++) {
+ if(STREQ(deviceConfig->contents[i].key,"location")) {
+ if (!(pathDetails = strdup(deviceConfig->contents[i].val))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ xen_string_string_map_free(deviceConfig);
+ xen_pbd_set_free(pbd_set);
+ return NULL;
+ }
+ break;
+ }
+ }
+ xen_string_string_map_free(deviceConfig);
+ xen_pbd_set_free(pbd_set);
+ }
+ if (pathDetails) {
+ if (VIR_ALLOC_N(host,strlen(pathDetails)) <0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ if (VIR_ALLOC_N(path,strlen(pathDetails)) <0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ host[0]='\0';path[0]='\0';
+ if (cifs) {
+ pdef->source.format = VIR_STORAGE_POOL_NETFS_CIFS_ISO;
+ sscanf(pathDetails,"//%[^/]%s",host,path);
+ } else {
+ pdef->source.format = VIR_STORAGE_POOL_NETFS_NFS_ISO;
+ sscanf(pathDetails,"%[^:]:%s",host,path);
+ }
+ if (STRNEQ(host,"\0")) {
+ if (!(pdef->source.host.name = strdup(host))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ }
+ if (STRNEQ(path,"\0")) {
+ if (!(pdef->source.dir = strdup(path))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ }
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ }
+ }
+ }
+ if (!(pdef->target.path = strdup("/"))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ xen_sr_record_free(record);
+ xml = virStoragePoolDefFormat(pdef);
+ virStoragePoolDefFree(pdef);
+ if (!xml)
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't convert to XML format");
+ return xml;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return NULL;
+ }
+}
+
+
+/*
+*XenapiStoragePoolNumOfVolumes
+*
+*Fetch the number of storage volumes within a pool
+*
+*/
+static int
+xenapiStoragePoolNumOfVolumes (virStoragePoolPtr pool ATTRIBUTE_UNUSED)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_set *vdi_set=NULL;
+ virConnectPtr conn = pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ int count=0;
+ if (xen_sr_get_by_name_label(session, &sr_set, pool->name) && sr_set->size!=0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool Name is not unique");
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ sr = sr_set->contents[0];
+ if (xen_sr_get_vdis(session, &vdi_set, sr) && vdi_set->size!=0) {
+ count = vdi_set->size;
+ xen_sr_set_free(sr_set);
+ xen_vdi_set_free(vdi_set);
+ return count;
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+ }
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+}
+
+
+/*
+*XenapiStoragePoolListVolumes
+*
+*Fetch list of storage volume names, limiting to at most maxnames.
+*
+*/
+static int
+xenapiStoragePoolListVolumes (virStoragePoolPtr pool, char ** const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_set *vdi_set=NULL;
+ xen_vdi vdi=NULL;
+ int count,i;
+ char *usenames = NULL;
+ virConnectPtr conn=pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_by_name_label(session, &sr_set, pool->name) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ sr = sr_set->contents[0];
+ if (xen_sr_get_vdis(session, &vdi_set, sr) && vdi_set->size>0) {
+ for (i=0,count=0; (i<vdi_set->size) && (count<maxnames); i++) {
+ vdi = vdi_set->contents[i];
+ if (xen_vdi_get_name_label(session, &usenames, vdi)) {
+ names[count++] = usenames;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_vdi_set_free(vdi_set);
+ xen_sr_set_free(sr_set);
+ while(--count) VIR_FREE(names[count]);
+ return -1;
+ }
+ }
+ xen_vdi_set_free(vdi_set);
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+}
+
+/*
+*XenapiStoragePoolIsActive
+*
+*Determine if the storage pool is currently running
+*
+*/
+static int
+xenapiStoragePoolIsActive(virStoragePoolPtr pool)
+{
+ xen_sr sr=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ virConnectPtr conn=pool->conn;
+ char uuid[VIR_UUID_STRING_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuid);
+
+ if (xen_sr_get_by_uuid(session, &sr, uuid)) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr) && pbd_set->size>0) {
+ bool currently_attached;
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ xen_pbd_set_free(pbd_set);
+ xen_sr_free(sr);
+ if (currently_attached == 1)
+ return 1; /* running */
+ else
+ return 0; /* not running */
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Device not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_sr_free(sr);
+ return -1;
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+}
+
+/*
+*XenapiStoragePoolLookupByUUID
+*
+*Lookup the storage pool by UUID
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByUUID (virConnectPtr conn,
+ const unsigned char * uuid)
+{
+ xen_sr sr = NULL;
+ xen_sr_record *record = NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ virStoragePoolPtr pool = NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr) && sr) {
+ if (xen_sr_get_record(session, &record, sr)) {
+ pool = virGetStoragePool(conn, record->name_label, uuid);
+ if (!pool) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get storage pool pointer");
+ xen_sr_record_free(record);
+ return pool;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_free(sr);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, NULL);
+ }
+ return pool;
+}
+
+
+/*
+*XenapiStoragePoolGetInfo
+*
+*Get information regarding the given storage pool
+*
+*/
+static int
+xenapiStoragePoolGetInfo (virStoragePoolPtr pool,
+ virStoragePoolInfoPtr info)
+{
+ xen_sr_record *record=NULL;
+ xen_sr sr=NULL;
+ virConnectPtr conn = pool->conn;
+ int state = -1;
+ char uuid[VIR_UUID_STRING_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuid);
+ if (xen_sr_get_by_uuid(session, &sr, uuid) && sr) {
+ if (xen_sr_get_record(session, &record, sr)) {
+ info->capacity = record->physical_size;
+ info->allocation = record->virtual_allocation;
+ info->available = record->physical_size - record->physical_utilisation;
+ state = xenapiStoragePoolIsActive(pool);
+ if(state == 1) info->state = VIR_STORAGE_POOL_RUNNING;
+ else if(state == 0) info->state = VIR_STORAGE_POOL_INACTIVE;
+ xen_sr_record_free(record);
+ return 0;
+ } else {
+ xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, NULL);
+ return -1;
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+}
+
+/*
+*XenapiStoragePoolLookupByVolume
+*
+*Lookup storage pool from the volume given
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByVolume (virStorageVolPtr vol)
+{
+ xen_sr_record *record=NULL;
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ virStoragePoolPtr poolPtr=NULL;
+ virConnectPtr conn = vol->conn;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_sr_get_by_name_label(session, &sr_set, vol->pool) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ sr = sr_set->contents[0];
+ xen_sr_get_record(session, &record, sr);
+ if (record!=NULL) {
+ virUUIDParse(record->uuid,raw_uuid);
+ poolPtr = virGetStoragePool(conn,(const char *)record->name_label, raw_uuid);
+ if (poolPtr != NULL) {
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return poolPtr;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool pointer unavailable");
+ }
+ xen_sr_record_free(record);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return NULL;
+}
+
+/*
+*XenapiStorageVolLookupByName
+*
+*Lookup Storage volume by unique name
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByName (virStoragePoolPtr pool,
+ const char *name)
+{
+ xen_vdi_set *vdi_set=NULL;
+ xen_vdi vdi=NULL;
+ virStorageVolPtr volPtr=NULL;
+ virConnectPtr conn = pool->conn;
+ char *uuid=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_vdi_get_by_name_label(session, &vdi_set, (char *)name) && vdi_set->size>0) {
+ if (vdi_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume name is not unique");
+ xen_vdi_set_free(vdi_set);
+ return NULL;
+ }
+ vdi = vdi_set->contents[0];
+ if (xen_vdi_get_uuid(session, &uuid, vdi)) {
+ volPtr = virGetStorageVol(conn, pool->name, name, uuid);
+ if (!volPtr) xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume pointer not available");
+ VIR_FREE(uuid);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't find the Unique key of the Storage Volume specified");
+ }
+ xen_vdi_set_free(vdi_set);
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_VOL, "Storage Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return volPtr;
+}
+
+/*
+*XenapiStorageVolGetInfo
+*
+*Get information about the given storage volume
+*
+*/
+static int
+xenapiStorageVolGetInfo (virStorageVolPtr vol,
+ virStorageVolInfoPtr info)
+{
+ virConnectPtr conn = vol->conn;
+ xen_vdi vdi=NULL;
+ xen_vdi_record *record=NULL;
+ xen_sr sr=NULL;
+ xen_sr_record *sr_record=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ int ret=-1;
+ //char uuid[VIR_UUID_STRING_BUFLEN];
+ //virUUIDFormat((unsigned char *)vol->key,uuid);
+
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (xen_vdi_get_record(session, &record, vdi)) {
+ info->capacity = record->virtual_size;
+ info->allocation = record->physical_utilisation;
+ if (xen_vdi_get_sr(session, &sr, vdi)) {
+ if (xen_sr_get_record(session, &sr_record, sr)) {
+ info->type = getStorageVolumeType(sr_record->type);
+ xen_sr_record_free(sr_record);
+ ret=0;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_record_free(record);
+ } else {
+ xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return ret;
+}
+
+static int
+xenapiStoragePoolIsPersistent (virStoragePoolPtr pool ATTRIBUTE_UNUSED)
+{
+ return 1; /* Storage Pool is always persistent */
+}
+
+
+/*
+*XenapiStorageVolGetXMLDesc
+*
+*Get Storage Volume configuration as XML
+*
+*/
+static char *
+xenapiStorageVolGetXMLDesc (virStorageVolPtr vol, unsigned int flags ATTRIBUTE_UNUSED)
+{
+ virBuffer buf = VIR_BUFFER_INITIALIZER;
+ virConnectPtr conn = vol->conn;
+ xen_vdi vdi=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_record *record=NULL;
+ char *sr_uuid =NULL, *srname=NULL, *xml=NULL, *poolXml=NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ virStorageVolDefPtr vdef=NULL;
+ virStoragePoolDefPtr pdef=NULL;
+ virStoragePoolPtr pool=NULL;
+
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (!xen_vdi_get_record(session, &record, vdi)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get Volume information");
+ xen_vdi_free(vdi);
+ virBufferFreeAndReset(&buf);
+ return NULL;
+ }
+ if (VIR_ALLOC(vdef)<0) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ return NULL;
+ }
+ if (!(vdef->name = strdup(record->name_label))) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ if (!(vdef->key = strdup(record->uuid))) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ vdef->allocation = record->virtual_size;
+ vdef->capacity = record->physical_utilisation;
+
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_uuid(session, &sr_uuid, sr)) {
+ virBufferVSprintf(&buf, "/%s/%s", sr_uuid, record->uuid);
+ vdef->target.path = virBufferContentAndReset(&buf);
+ }
+ xen_sr_get_name_label(session, &srname, sr);
+ if (sr) xen_sr_free(sr);
+ xen_vdi_record_free(record);
+
+ virUUIDParse(sr_uuid, raw_uuid);
+ if(!(pool = virGetStoragePool(conn, srname, raw_uuid))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Could get storage pool pointer");
+ VIR_FREE(srname);
+ VIR_FREE(sr_uuid);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ VIR_FREE(srname);
+ VIR_FREE(sr_uuid);
+ if (!(poolXml = xenapiStoragePoolGetXMLDesc(pool, 0))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get Storage Pool XML");
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ if(!(pdef = virStoragePoolDefParseString(poolXml))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't parse Storage Pool XML");
+ VIR_FREE(poolXml);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ VIR_FREE(poolXml);
+ if(!(xml = virStorageVolDefFormat(pdef, vdef))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't convert Storage Volume info to XML");
+ virStorageVolDefFree(vdef);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ virStorageVolDefFree(vdef);
+ virStoragePoolDefFree(pdef);
+ return xml;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return NULL;
+ }
+}
+
+/*
+*XenapiStorageVolLookupByPath
+*
+*Lookup Storage Volume for the given path
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByPath (virConnectPtr conn,
+ ATTRIBUTE_UNUSED const char * path)
+{
+ xen_sr sr=NULL;
+ xen_vdi vdi=NULL;
+ virStorageVolPtr volPtr=NULL;
+ char *srname=NULL,*vname=NULL;
+ char sruuid[VIR_UUID_STRING_BUFLEN]="\0", vuuid[VIR_UUID_STRING_BUFLEN]="\0";
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ sscanf(path,"/%[^/]/%[^/]",sruuid,vuuid);
+ if (STREQ(sruuid,"\0") || STREQ(vuuid,"\0")) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Invalid path");
+ return NULL;
+ }
+ if (xen_sr_get_by_uuid(session, &sr, sruuid) && xen_sr_get_name_label(session, &srname, sr)) {
+ if (xen_vdi_get_by_uuid(session, &vdi, vuuid) && xen_vdi_get_name_label(session, &vname, vdi)) {
+ if (!(volPtr = virGetStorageVol(conn, srname, vname, vuuid)))
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume pointer not available");
+ VIR_FREE(vname);
+ xen_vdi_free(vdi);
+ } else {
+ if (vdi) xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ VIR_FREE(srname);
+ xen_sr_free(sr);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return volPtr;
+}
+
+/*
+*XenapiStorageVolGetPath
+*
+*Get path for the specified storage volume
+*
+*/
+static char *
+xenapiStorageVolGetPath (virStorageVolPtr vol)
+{
+ xen_vdi vdi=NULL;
+ virConnectPtr conn = vol->conn;
+ virBuffer path = VIR_BUFFER_INITIALIZER;
+ xen_sr sr=NULL;
+ char *sruuid=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_uuid(session, &sruuid, sr)) {
+ virBufferVSprintf(&path,"/%s/%s",sruuid,vol->key);
+ VIR_FREE(sruuid);
+ xen_sr_free(sr);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_free(vdi);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return virBufferContentAndReset(&path);
+}
+
+static int
+xenapiStoragePoolRefresh ( ATTRIBUTE_UNUSED virStoragePoolPtr pool,
+ ATTRIBUTE_UNUSED unsigned int flags)
+{
+ return 0;
+}
+
+/*
+*XenapiStorageVolLookupByKey
+*
+*Lookup storage volume for the given key
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByKey (virConnectPtr conn, const char * key)
+{
+ xen_vdi vdi=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_record *vrecord=NULL;
+ xen_sr_record *srecord=NULL;
+ virStorageVolPtr volPtr=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_vdi_get_by_uuid(session, &vdi, (char *)key) && xen_vdi_get_record(session, &vrecord, vdi)) {
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_record(session, &srecord, sr)) {
+ volPtr = virGetStorageVol(conn, srecord->name_label, vrecord->name_label, key);
+ if (!volPtr)
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume Pointer not available");
+ xen_sr_record_free(srecord);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_record_free(vrecord);
+ } else {
+ if (vdi) xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return volPtr;
+}
+
+
+/*
+*XenapiStoragePoolDestroy
+*
+*unplug PBDs connected to the specified storage pool
+*
+*/
+static int
+xenapiStoragePoolDestroy (virStoragePoolPtr pool)
+{
+ xen_sr sr=NULL;
+ xen_pbd pbd=NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ struct xen_pbd_set *pbd_set=NULL;
+ int i,ret=-1;
+ virConnectPtr conn = pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr)) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr) && pbd_set->size>0) {
+ for (i=0;i<pbd_set->size;i++) {
+ pbd = pbd_set->contents[0];
+ if (xen_pbd_unplug(session, pbd))
+ ret=0;
+ else
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "There are no PBDs in the specified pool to unplug");
+ xen_pbd_set_free(pbd_set);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ xen_sr_free(sr);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return ret;
+}
+
+static virStorageDriver xenapiStorageDriver = {
+ "XenAPI Storage",
+ xenapiStorageOpen,
+ xenapiStorageClose,
+ xenapiNumOfStoragePools,
+ xenapiListStoragePools,
+ xenapiNumOfDefinedStoragePools,
+ xenapiListDefinedStoragePools,
+ NULL,
+ xenapiStoragePoolLookupByName,
+ xenapiStoragePoolLookupByUUID,
+ xenapiStoragePoolLookupByVolume,
+ xenapiStoragePoolCreateXML,
+ NULL,
+ xenapiStoragePoolBuild,
+ NULL,
+ xenapiStoragePoolCreate,
+ xenapiStoragePoolDestroy,
+ NULL,
+ xenapiStoragePoolRefresh,
+ xenapiStoragePoolGetInfo,
+ xenapiStoragePoolGetXMLDesc,
+ xenapiStoragePoolGetAutostart,
+ xenapiStoragePoolSetAutostart,
+ xenapiStoragePoolNumOfVolumes,
+ xenapiStoragePoolListVolumes,
+ xenapiStorageVolLookupByName,
+ xenapiStorageVolLookupByKey,
+ xenapiStorageVolLookupByPath,
+ NULL,
+ NULL,
+ NULL,
+ xenapiStorageVolGetInfo,
+ xenapiStorageVolGetXMLDesc,
+ xenapiStorageVolGetPath,
+ xenapiStoragePoolIsActive,
+ xenapiStoragePoolIsPersistent
+};
+
+
+/*
+*XenapiStorageRegister
+*
+*Register the storage driver APIs
+*
+*/
+int
+xenapiStorageRegister (void)
+{
+ return virRegisterStorageDriver(&xenapiStorageDriver);
+}
+
+
+
+
--- ./libvirt_org/src/xenapi/xenapi_storage_driver.h 1970-01-01 01:00:00.000000000 +0100
+++ ./libvirt/src/xenapi/xenapi_storage_driver.h 2010-03-11 12:46:00.000000000 +0000
@@ -0,0 +1,42 @@
+/*
+ * xenapi_storage_driver.h: Xen API Storage Driver header file
+ * Copyright (C) 2009, 2010 Citrix Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Sharadha Prabhakar <sharadha.prabhakar(a)citrix.com>
+ */
+
+
+#ifndef __VIR_XENAPI_STORAGE_H__
+#define __VIR_XENAPI_STORAGE_H__
+
+#include <xen/api/xen_common.h>
+#include <libxml/tree.h>
+
+
+
+/* XenAPI storage driver's private data structure */
+struct _xenapiStoragePrivate {
+ xen_session *session;
+ char *url;
+ int noVerify;
+ virCapsPtr caps;
+};
+
+
+
+
+#endif /* __VIR_XENAPI_STORAGE_H__ */
--- ./src/xenapi/xenapi_utils.h_orig 2010-03-24 15:38:59.000000000 +0000
+++ ./src/xenapi/xenapi_utils.h 2010-03-23 10:44:38.000000000 +0000
@@ -56,8 +56,12 @@
#include "buf.h"
#define NETWORK_DEVID_SIZE (12)
+#define STORAGE_DEVID_SIZE (12)
typedef uint64_t cpumap_t;
+//newly added
+int
+createVbdStorage (virConnectPtr conn, xen_vm vm, int device, char *path, int devtype);
void
xenSessionFree(xen_session *session);
--- ./src/xenapi/xenapi_utils.c_orig 2010-03-24 15:32:28.000000000 +0000
+++ ./src/xenapi/xenapi_utils.c 2010-03-24 15:09:41.000000000 +0000
@@ -53,6 +53,7 @@
#include "xenapi_utils.h"
#include "util/logging.h"
#include "qparams.h"
+#include "xenapi_storage_driver.h"
void
xenSessionFree(xen_session *session)
@@ -390,17 +391,96 @@
const char *buf, const char *filename, const char *func, size_t lineno)
{
struct _xenapiPrivate *priv = conn->privateData;
-
- if (buf == NULL && priv != NULL && priv->session != NULL) {
- char *ret = returnErrorFromSession(priv->session);
- virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
- xen_session_clear_error(priv->session);
- VIR_FREE(ret);
+ struct _xenapiStoragePrivate *privS = conn->storagePrivateData;
+ char *ret = NULL;
+ if (buf == NULL) {
+ if (priv != NULL && priv->session != NULL) {
+ if (!priv->session->ok) {
+ ret = returnErrorFromSession(priv->session);
+ virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
+ xen_session_clear_error(priv->session);
+ VIR_FREE(ret);
+ }
+ }
+ if (privS != NULL && privS->session !=NULL) {
+ if (!privS->session->ok) {
+ ret = returnErrorFromSession(privS->session);
+ virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
+ xen_session_clear_error(privS->session);
+ VIR_FREE(ret);
+ }
+ }
} else {
virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), buf);
}
}
+/* create VBDs for VM */
+int
+createVbdStorage (virConnectPtr conn, xen_vm vm, int device, char *path, int devtype)
+{
+ xen_vm xvm=NULL;
+ xen_vdi vdi=NULL;
+ xen_vbd vbd=NULL;
+ char *vmuuid=NULL;
+ char userdevice[STORAGE_DEVID_SIZE]="\0";
+ xen_vbd_record *record=NULL;
+ xen_session *session = ((struct _xenapiPrivate *)(conn->privateData))->session;
+ char sruuid[VIR_UUID_STRING_BUFLEN]="\0", voluuid[VIR_UUID_STRING_BUFLEN]="\0";
+ if (sscanf(path,"/%[^/]/%[^/]",sruuid,voluuid)!=2)
+ return -1;
+ fprintf(stderr,"\nsruuid: %s\nvoluuid: %s",sruuid,voluuid);
+ if (!xen_vm_get_uuid(session, &vmuuid, vm))
+ return -1;
+ if (!xen_vm_get_by_uuid(session, &xvm, vmuuid)){
+ VIR_FREE(vmuuid);
+ return -1;
+ }
+ VIR_FREE(vmuuid);
+ if (!xen_vdi_get_by_uuid(session, &vdi, voluuid)) {
+ xen_vm_free(xvm);
+ return -1;
+ }
+ sprintf(userdevice,"%d",device);
+ xen_vm_record_opt *vm_opt = xen_vm_record_opt_alloc();
+ vm_opt->is_record = 0;
+ vm_opt->u.handle = xvm;
+
+ xen_vdi_record_opt *vdi_opt = xen_vdi_record_opt_alloc();
+ vdi_opt->is_record = 0;
+ vdi_opt->u.handle = vdi;
+
+ record = xen_vbd_record_alloc();
+ record->vm = vm_opt;
+ record->vdi = vdi_opt;
+ if (!(record->userdevice = strdup(userdevice))) {
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ record->other_config = xen_string_string_map_alloc(0);
+ record->runtime_properties = xen_string_string_map_alloc(0);
+ record->qos_algorithm_params = xen_string_string_map_alloc(0);
+ if (devtype == VIR_DOMAIN_DISK_DEVICE_DISK)
+ record->type = XEN_VBD_TYPE_DISK;
+ else if (devtype == VIR_DOMAIN_DISK_DEVICE_CDROM)
+ record->type = XEN_VBD_TYPE_CD;
+ else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Only CDROM and HardDisk supported");
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ if (!xen_vbd_create(session, &vbd, record)){
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ xen_vbd_record_free(record);
+
+ return 0;
+}
+
+
+
/* creates network intereface for VM */
int
createVifNetwork (virConnectPtr conn, xen_vm vm, char *device,
@@ -557,6 +637,7 @@
int device_number=0;
char *bridge=NULL,*mac=NULL;
int i;
+ //support for network interfaces
for (i=0;i<def->nnets;i++) {
if (def->nets[i]->type == VIR_DOMAIN_NET_TYPE_BRIDGE) {
if (def->nets[i]->data.bridge.brname)
@@ -580,6 +661,13 @@
if (bridge) VIR_FREE(bridge);
}
}
+ //support for disks here
+ for (i=0;i<def->ndisks;i++) {
+ if (createVbdStorage(conn, *vm, i, def->disks[i]->src, def->disks[i]->device)!= 0) {
+ xen_vm_record_free(*record);
+ return -1;
+ }
+ }
return 0;
error_cleanup:
--- ../libvirt_org/src/conf/storage_conf.c 2010-02-17 17:38:05.000000000 +0000
+++ ./src/conf/storage_conf.c 2010-03-22 15:08:36.000000000 +0000
@@ -61,7 +61,7 @@
VIR_ENUM_IMPL(virStoragePoolFormatFileSystemNet,
VIR_STORAGE_POOL_NETFS_LAST,
- "auto", "nfs", "glusterfs")
+ "auto", "nfs", "nfs-iso", "cifs-iso", "glusterfs")
VIR_ENUM_IMPL(virStoragePoolFormatDisk,
VIR_STORAGE_POOL_DISK_LAST,
--- ../libvirt_org/src/conf/storage_conf.h 2010-02-17 17:38:06.000000000 +0000
+++ ./src/conf/storage_conf.h 2010-03-22 14:01:02.000000000 +0000
@@ -404,6 +404,8 @@
enum virStoragePoolFormatFileSystemNet {
VIR_STORAGE_POOL_NETFS_AUTO = 0,
VIR_STORAGE_POOL_NETFS_NFS,
+ VIR_STORAGE_POOL_NETFS_NFS_ISO,
+ VIR_STORAGE_POOL_NETFS_CIFS_ISO,
VIR_STORAGE_POOL_NETFS_GLUSTERFS,
VIR_STORAGE_POOL_NETFS_LAST,
};
--- ./src/Makefile.am_04mar 2010-03-05 10:55:04.000000000 +0000
+++ ./src/Makefile.am 2010-03-23 18:11:50.000000000 +0000
@@ -210,7 +211,9 @@
XENAPI_DRIVER_SOURCES = \
xenapi/xenapi_driver.c xenapi/xenapi_driver.h \
xenapi_driver_private.h \
- xenapi/xenapi_utils.c xenapi/xenapi_utils.h
+ xenapi/xenapi_utils.c xenapi/xenapi_utils.h \
+ xenapi/xenapi_storage_driver.c \
+ xenapi/xenapi_storage_driver.h
UML_DRIVER_SOURCES = \
uml/uml_conf.c uml/uml_conf.h \
--- ../libvirt_org/src/libvirt.c 2010-02-17 17:38:08.000000000 +0000
+++ ./src/libvirt.c 2010-03-11 12:14:33.000000000 +0000
@@ -377,6 +381,10 @@
#ifdef WITH_ESX
if (esxRegister() == -1) return -1;
#endif
#ifdef WITH_XENAPI
if (xenapiRegister () == -1) return -1;
+ if (xenapiStorageRegister () == -1) return -1;
#endif
#ifdef WITH_REMOTE
if (remoteRegister () == -1) return -1;
#endif
--- ./src/xenapi/xenapi_driver.h_orig 2010-03-23 19:00:14.000000000 +0000
+++ ./src/xenapi/xenapi_driver.h 2010-03-11 11:11:01.000000000 +0000
@@ -25,5 +25,6 @@
extern int xenapiRegister (void);
+extern int xenapiStorageRegister (void);
#endif /* __VIR_XENAPI_PRIV_H__ */
1
1
26 Mar '10
Hello,
While using multiple disk devices with different bus types (ide and virtio)
I noticed that the order of disks is handled in a special way.
The disk device which was defined as the first item will not be the first
entry in the system xml file.
e.g. if I define a virtio device (which is supposed to be the boot device)
this would be added after all the ide devices.
So it's not possible to boot from a virtio device even if I have defined
other ide devices since qemu flags the first device as boot device.
Is there a special reason why the disks are sorted by type (first all ide
and then virtio) because that's prevents from booting virtio devices.
Thanks.
Mit freundlichen Grüßen / Kind regards
Ingo Tuchscherer
1
0
No big deal, but I saw recent additions of "test ... -a ..."
(not portable) so fixed the rest, too.
Now, searching for violations shows none:
git grep '\<test .* -a '
Whether it's possible to rely on test -a in test scripts is debatable:
perhaps you've ensured that the SHELL you use when running tests is
POSIX compliant or better (I do that in coreutils), but at least in
configure.ac, we should toe the line wrt portability (because *it*
has less choice), so those are in a separate commit.
Since this is a global change, it deserves a syntax-check rule.
That's the 3/3 patch, below.
1/3 fixes test-lib.sh
2/3 fixes configure.ac
>From ca7db6cb8000cc283fcee7899140d2fc892b0296 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Wed, 24 Mar 2010 09:05:27 +0100
Subject: [PATCH 1/3] tests: shell script portability and clean-up
* tests/test-lib.sh: "echo -n" is not portable. Use printf instead.
Remove unnecessary uses of "eval-in-subshell" (subshell is sufficient).
Remove uses of tests' -a operator; it is not portable.
Instead, use "test cond && test cond2".
* tests/schematestutils.sh: Replace use of test's -a.
---
tests/schematestutils.sh | 2 +-
tests/test-lib.sh | 20 ++++++++++----------
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/tests/schematestutils.sh b/tests/schematestutils.sh
index 301b9eb..f172857 100644
--- a/tests/schematestutils.sh
+++ b/tests/schematestutils.sh
@@ -21,7 +21,7 @@ do
ret=$?
test_result $n $(basename $(dirname $xml))"/"$(basename $xml) $ret
- if test "$verbose" = "1" -a $ret != 0 ; then
+ if test "$verbose" = "1" && test $ret != 0 ; then
echo -e "$cmd\n$result"
fi
if test "$ret" != 0 ; then
diff --git a/tests/test-lib.sh b/tests/test-lib.sh
index 57fd438..28b830e 100644
--- a/tests/test-lib.sh
+++ b/tests/test-lib.sh
@@ -19,7 +19,7 @@ test_intro()
name=$1
if test "$verbose" = "0" ; then
echo "TEST: $name"
- echo -n " "
+ printf " "
fi
}
@@ -29,15 +29,15 @@ test_result()
name=$2
status=$3
if test "$verbose" = "0" ; then
- mod=`eval "expr \( $counter - 1 \) % 40"`
- if test "$counter" != 1 -a "$mod" = 0 ; then
- printf " %-3d\n" `eval "expr $counter - 1"`
- echo -n " "
+ mod=`expr \( $counter + 40 - 1 \) % 40`
+ if test "$counter" != 1 && test "$mod" = 0 ; then
+ printf " %-3d\n" `expr $counter - 1`
+ printf " "
fi
if test "$status" = "0" ; then
- echo -n "."
+ printf "."
else
- echo -n "!"
+ printf "!"
fi
else
if test "$status" = "0" ; then
@@ -54,11 +54,11 @@ test_final()
status=$2
if test "$verbose" = "0" ; then
- mod=`eval "expr \( $counter + 1 \) % 40"`
- if test "$mod" != "0" -a "$mod" != "1" ; then
+ mod=`expr \( $counter + 1 \) % 40`
+ if test "$mod" != "0" && test "$mod" != "1" ; then
for i in `seq $mod 40`
do
- echo -n " "
+ printf " "
done
fi
if test "$status" = "0" ; then
--
1.7.0.3.435.g097f4
>From 7998714d60b997357bfea15d6f2d0f729fc8fb29 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Wed, 24 Mar 2010 09:10:13 +0100
Subject: [PATCH 2/3] build: don't use "test cond1 -a cond2" in configure: it's not portable
* configure.ac: Use "test cond1 && test cond2" instead.
---
configure.ac | 26 +++++++++++++-------------
1 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/configure.ac b/configure.ac
index bcf1d5a..2e6d2e4 100644
--- a/configure.ac
+++ b/configure.ac
@@ -197,10 +197,10 @@ dnl if --prefix is /usr, don't use /usr/var for localstatedir
dnl or /usr/etc for sysconfdir
dnl as this makes a lot of things break in testing situations
-if test "$prefix" = "/usr" -a "$localstatedir" = '${prefix}/var' ; then
+if test "$prefix" = "/usr" && test "$localstatedir" = '${prefix}/var' ; then
localstatedir='/var'
fi
-if test "$prefix" = "/usr" -a "$sysconfdir" = '${prefix}/etc' ; then
+if test "$prefix" = "/usr" && test "$sysconfdir" = '${prefix}/etc' ; then
sysconfdir='/etc'
fi
@@ -240,7 +240,7 @@ AC_ARG_WITH([libvirtd],
dnl
dnl specific tests to setup DV devel environments with debug etc ...
dnl
-if [[ "${LOGNAME}" = "veillard" -a "`pwd`" = "/u/veillard/libvirt" ]] ; then
+if [[ "${LOGNAME}" = "veillard" && test "`pwd`" = "/u/veillard/libvirt" ]] ; then
STATIC_BINARIES="-static"
else
STATIC_BINARIES=
@@ -351,7 +351,7 @@ LIBXENSERVER_LIBS=""
LIBXENSERVER_CFLAGS=""
dnl search for the XenServer library
if test "$with_xenapi" != "no" ; then
- if test "$with_xenapi" != "yes" -a "$with_xenapi" != "check" ; then
+ if test "$with_xenapi" != "yes" && test "$with_xenapi" != "check" ; then
LIBXENSERVER_CFLAGS="-I$with_xenapi/include"
LIBXENSERVER_LIBS="-L$with_xenapi"
fi
@@ -390,7 +390,7 @@ XEN_LIBS=""
XEN_CFLAGS=""
dnl search for the Xen store library
if test "$with_xen" != "no" ; then
- if test "$with_xen" != "yes" -a "$with_xen" != "check" ; then
+ if test "$with_xen" != "yes" && test "$with_xen" != "check" ; then
XEN_CFLAGS="-I$with_xen/include"
XEN_LIBS="-L$with_xen/lib64 -L$with_xen/lib"
fi
@@ -571,7 +571,7 @@ AC_ARG_WITH([libxml], AC_HELP_STRING([--with-libxml=@<:@PFX@:>@], [libxml2 locat
if test "x$with_libxml" = "xno" ; then
AC_MSG_CHECKING(for libxml2 libraries >= $LIBXML_REQUIRED)
AC_MSG_ERROR([libxml2 >= $LIBXML_REQUIRED is required for libvirt])
-elif test "x$with_libxml" = "x" -a "x$PKG_CONFIG" != "x" ; then
+elif test "x$with_libxml" = "x" && test "x$PKG_CONFIG" != "x" ; then
PKG_CHECK_MODULES(LIBXML, libxml-2.0 >= $LIBXML_REQUIRED, [LIBXML_FOUND=yes], [LIBXML_FOUND=no])
fi
if test "$LIBXML_FOUND" = "no" ; then
@@ -661,7 +661,7 @@ AC_ARG_WITH([sasl],
SASL_CFLAGS=
SASL_LIBS=
if test "x$with_sasl" != "xno"; then
- if test "x$with_sasl" != "xyes" -a "x$with_sasl" != "xcheck"; then
+ if test "x$with_sasl" != "xyes" && test "x$with_sasl" != "xcheck"; then
SASL_CFLAGS="-I$with_sasl"
SASL_LIBS="-L$with_sasl"
fi
@@ -716,7 +716,7 @@ AC_ARG_WITH([yajl],
YAJL_CFLAGS=
YAJL_LIBS=
if test "x$with_yajl" != "xno"; then
- if test "x$with_yajl" != "xyes" -a "x$with_yajl" != "xcheck"; then
+ if test "x$with_yajl" != "xyes" && test "x$with_yajl" != "xcheck"; then
YAJL_CFLAGS="-I$with_yajl/include"
YAJL_LIBS="-L$with_yajl/lib"
fi
@@ -1004,7 +1004,7 @@ AC_ARG_WITH([numactl],
NUMACTL_CFLAGS=
NUMACTL_LIBS=
-if test "$with_qemu" = "yes" -a "$with_numactl" != "no"; then
+if test "$with_qemu" = "yes" && test "$with_numactl" != "no"; then
old_cflags="$CFLAGS"
old_libs="$LIBS"
if test "$with_numactl" = "check"; then
@@ -1062,7 +1062,7 @@ dnl
dnl libssh checks
dnl
-if test "$with_libssh2" != "yes" -a "$with_libssh2" != "no"; then
+if test "$with_libssh2" != "yes" && test "$with_libssh2" != "no"; then
libssh2_path="$with_libssh2"
elif test "$with_libssh2" = "yes"; then
libssh2_path="/usr/local/lib/"
@@ -1143,7 +1143,7 @@ dnl introduced in 0.4.0 release which need as minimum
dnl
CAPNG_CFLAGS=
CAPNG_LIBS=
-if test "$with_qemu" = "yes" -a "$with_capng" != "no"; then
+if test "$with_qemu" = "yes" && test "$with_capng" != "no"; then
old_cflags="$CFLAGS"
old_libs="$LIBS"
if test "$with_capng" = "check"; then
@@ -1453,7 +1453,7 @@ if test "$with_storage_disk" = "yes" -o "$with_storage_disk" = "check"; then
PARTED_FOUND=yes
fi
- if test "$with_storage_disk" != "no" -a "x$PKG_CONFIG" != "x" ; then
+ if test "$with_storage_disk" != "no" && test "x$PKG_CONFIG" != "x" ; then
PKG_CHECK_MODULES(LIBPARTED, libparted >= $PARTED_REQUIRED, [], [PARTED_FOUND=no])
fi
if test "$PARTED_FOUND" = "no"; then
@@ -1635,7 +1635,7 @@ else
fi
AC_MSG_RESULT($RUNNING_XEND)
-AM_CONDITIONAL([ENABLE_XEN_TESTS], [test "$RUNNING_XEN" != "no" -a "$RUNNING_XEND" != "no"])
+AM_CONDITIONAL([ENABLE_XEN_TESTS], [test "$RUNNING_XEN" != "no" && test "$RUNNING_XEND" != "no"])
AC_ARG_ENABLE([test-coverage],
AC_HELP_STRING([--enable-test-coverage], [turn on code coverage instrumentation @<:@default=no@:>@]),
--
1.7.0.3.435.g097f4
>From 95c8ddd2eca90e3024a6f74af84517c1e0115a60 Mon Sep 17 00:00:00 2001
From: Jim Meyering <meyering(a)redhat.com>
Date: Wed, 24 Mar 2010 09:32:43 +0100
Subject: [PATCH 3/3] maint: add syntax-check rule to prohibit use of test's -a operator
* cfg.mk (sc_prohibit_test_minus_a): New rule.
---
cfg.mk | 6 ++++++
1 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/cfg.mk b/cfg.mk
index 2d0d278..4302338 100644
--- a/cfg.mk
+++ b/cfg.mk
@@ -269,6 +269,12 @@ sc_preprocessor_indentation:
echo '$(ME): skipping test $@: cppi not installed' 1>&2; \
fi
+# Using test's -a operator is not portable.
+sc_prohibit_test_minus_a:
+ @re='\<test .+ -[a] ' \
+ msg='use "test C1 && test C2, not "test C1 -''a C2"' \
+ $(_prohibit_regexp)
+
sc_copyright_format:
@$(VC_LIST_EXCEPT) | xargs grep -ni 'copyright .*Red 'Hat \
| grep -v Inc \
--
1.7.0.3.435.g097f4
2
7
Hi,
I've mentioned this to a few folks already but I wanted to start a
proper thread.
We're struggling in qemu with usability and one area that concerns me is
the disparity in features that are supported by qemu vs what's
implemented in libvirt.
This isn't necessarily libvirt's problem if it's mission is to provide a
common hypervisor API that covers the most commonly used features.
However, for qemu, we need an API that covers all of our features that
people can develop against. The ultimate question we need to figure out
is, should we encourage our users to always use libvirt or should we
build our own API for people (and libvirt) to consume.
I don't think it's necessarily a big technical challenge for libvirt to
support qemu more completely. I think it amounts to introducing a
series of virQemuXXXX APIs that implement qemu specific functions. Over
time, qemu specific APIs can be deprecated in favour of more generic
virDomain APIs.
What's the feeling about this from the libvirt side of things? Is there
interest in support hypervisor specific interfaces should we be looking
to provide our own management interface for libvirt to consume?
Regards,
Anthony Liguori
12
34
[libvirt] [PATCH] esx: Make the conf parser compare names case insensitive in VMX mode
by Matthias Bolte 26 Mar '10
by Matthias Bolte 26 Mar '10
26 Mar '10
The keys of entries in a VMX file are case insensitive. Both scsi0:1.fileName
and scsi0:1.filename are valid. Therefore, make the conf parser compare names
case insensitive in VMX mode to accept every capitalization variation.
Also add test cases for this.
---
src/util/conf.c | 5 ++-
tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx | 51 ++++++++++++++++++++++
tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml | 25 +++++++++++
tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx | 51 ++++++++++++++++++++++
tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml | 25 +++++++++++
tests/vmx2xmltest.c | 9 ++++
6 files changed, 165 insertions(+), 1 deletions(-)
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx
create mode 100644 tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml
diff --git a/src/util/conf.c b/src/util/conf.c
index 24588c2..ae0459e 100644
--- a/src/util/conf.c
+++ b/src/util/conf.c
@@ -831,7 +831,10 @@ virConfGetValue(virConfPtr conf, const char *setting)
cur = conf->entries;
while (cur != NULL) {
- if ((cur->name != NULL) && (STREQ(cur->name, setting)))
+ if ((cur->name != NULL) &&
+ ((conf->flags & VIR_CONF_FLAG_VMX_FORMAT &&
+ STRCASEEQ(cur->name, setting)) ||
+ STREQ(cur->name, setting)))
return(cur->value);
cur = cur->next;
}
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx
new file mode 100644
index 0000000..3626c5e
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.vmx
@@ -0,0 +1,51 @@
+CONFIG.VERSION = "8"
+VIRTUALHW.VERSION = "4"
+FLOPPY0.PRESENT = "FALSE"
+NVRAM = "FEDORA11.NVRAM"
+DEPLOYMENTPLATFORM = "WINDOWS"
+VIRTUALHW.PRODUCTCOMPATIBILITY = "HOSTED"
+TOOLS.UPGRADE.POLICY = "USEGLOBAL"
+POWERTYPE.POWEROFF = "DEFAULT"
+POWERTYPE.POWERON = "DEFAULT"
+POWERTYPE.SUSPEND = "DEFAULT"
+POWERTYPE.RESET = "DEFAULT"
+
+DISPLAYNAME = "FEDORA11"
+EXTENDEDCONFIGFILE = "FEDORA11.VMXF"
+
+SCSI0.PRESENT = "TRUE"
+SCSI0.SHAREDBUS = "NONE"
+SCSI0.VIRTUALDEV = "LSILOGIC"
+MEMSIZE = "1024"
+SCSI0:0.PRESENT = "TRUE"
+SCSI0:0.FILENAME = "FEDORA11.vmdk"
+SCSI0:0.DEVICETYPE = "SCSI-HARDDISK"
+IDE0:0.PRESENT = "TRUE"
+IDE0:0.CLIENTDEVICE = "TRUE"
+IDE0:0.DEVICETYPE = "CDROM-RAW"
+IDE0:0.STARTCONNECTED = "FALSE"
+ETHERNET0.PRESENT = "TRUE"
+ETHERNET0.NETWORKNAME = "VM NETWORK"
+ETHERNET0.ADDRESSTYPE = "VPX"
+ETHERNET0.GENERATEDADDRESS = "00:50:56:91:48:C7"
+CHIPSET.ONLINESTANDBY = "FALSE"
+GUESTOSALTNAME = "RED HAT ENTERPRISE LINUX 5 (32-BIT)"
+GUESTOS = "RHEL5"
+UUID.BIOS = "50 11 5E 16 9B DC 49 D7-F1 71 53 C4 D7 F9 17 10"
+SNAPSHOT.ACTION = "KEEP"
+SCHED.CPU.MIN = "0"
+SCHED.CPU.UNITS = "MHZ"
+SCHED.CPU.SHARES = "NORMAL"
+SCHED.MEM.MINSIZE = "0"
+SCHED.MEM.SHARES = "NORMAL"
+TOOLSCRIPTS.AFTERPOWERON = "TRUE"
+TOOLSCRIPTS.AFTERRESUME = "TRUE"
+TOOLSCRIPTS.BEFORESUSPEND = "TRUE"
+TOOLSCRIPTS.BEFOREPOWEROFF = "TRUE"
+
+SCSI0:0.REDO = ""
+TOOLS.SYNCTIME = "FALSE"
+UUID.LOCATION = "56 4D B5 06 A2 BD FB EB-AE 86 F7 D8 49 27 D0 C4"
+SCHED.CPU.MAX = "UNLIMITED"
+SCHED.SWAP.DERIVEDNAME = "/VMFS/VOLUMES/498076B2-02796C1A-EF5B-000AE484A6A3/FEDORA11/FEDORA11-7DE040D8.VSWP"
+TOOLS.REMINDINSTALL = "TRUE"
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml
new file mode 100644
index 0000000..0be570f
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-1.xml
@@ -0,0 +1,25 @@
+<domain type='vmware'>
+ <name>FEDORA11</name>
+ <uuid>50115e16-9bdc-49d7-f171-53c4d7f91710</uuid>
+ <memory>1048576</memory>
+ <currentMemory>1048576</currentMemory>
+ <vcpu>1</vcpu>
+ <os>
+ <type arch='i686'>hvm</type>
+ </os>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='LSILOGIC'/>
+ <source file='[datastore] directory/FEDORA11.vmdk'/>
+ <target dev='sda' bus='scsi'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address='00:50:56:91:48:c7'/>
+ <source bridge='VM NETWORK'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx
new file mode 100644
index 0000000..a485d03
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.vmx
@@ -0,0 +1,51 @@
+config.version = "8"
+virtualhw.version = "4"
+floppy0.present = "false"
+nvram = "fedora11.nvram"
+deploymentplatform = "windows"
+virtualhw.productcompatibility = "hosted"
+tools.upgrade.policy = "useglobal"
+powertype.poweroff = "default"
+powertype.poweron = "default"
+powertype.suspend = "default"
+powertype.reset = "default"
+
+displayname = "fedora11"
+extendedconfigfile = "fedora11.vmxf"
+
+scsi0.present = "true"
+scsi0.sharedbus = "none"
+scsi0.virtualdev = "lsilogic"
+memsize = "1024"
+scsi0:0.present = "true"
+scsi0:0.filename = "fedora11.vmdk"
+scsi0:0.devicetype = "scsi-harddisk"
+ide0:0.present = "true"
+ide0:0.clientdevice = "true"
+ide0:0.devicetype = "cdrom-raw"
+ide0:0.startconnected = "false"
+ethernet0.present = "true"
+ethernet0.networkname = "vm network"
+ethernet0.addresstype = "vpx"
+ethernet0.generatedaddress = "00:50:56:91:48:c7"
+chipset.onlinestandby = "false"
+guestosaltname = "red hat enterprise linux 5 (32-bit)"
+guestos = "rhel5"
+uuid.bios = "50 11 5e 16 9b dc 49 d7-f1 71 53 c4 d7 f9 17 10"
+snapshot.action = "keep"
+sched.cpu.min = "0"
+sched.cpu.units = "mhz"
+sched.cpu.shares = "normal"
+sched.mem.minsize = "0"
+sched.mem.shares = "normal"
+toolscripts.afterpoweron = "true"
+toolscripts.afterresume = "true"
+toolscripts.beforesuspend = "true"
+toolscripts.beforepoweroff = "true"
+
+scsi0:0.redo = ""
+tools.synctime = "false"
+uuid.location = "56 4d b5 06 a2 bd fb eb-ae 86 f7 d8 49 27 d0 c4"
+sched.cpu.max = "unlimited"
+sched.swap.derivedname = "/vmfs/volumes/498076b2-02796c1a-ef5b-000ae484a6a3/fedora11/fedora11-7de040d8.vswp"
+tools.remindinstall = "true"
diff --git a/tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml
new file mode 100644
index 0000000..766172f
--- /dev/null
+++ b/tests/vmx2xmldata/vmx2xml-case-insensitive-2.xml
@@ -0,0 +1,25 @@
+<domain type='vmware'>
+ <name>fedora11</name>
+ <uuid>50115e16-9bdc-49d7-f171-53c4d7f91710</uuid>
+ <memory>1048576</memory>
+ <currentMemory>1048576</currentMemory>
+ <vcpu>1</vcpu>
+ <os>
+ <type arch='i686'>hvm</type>
+ </os>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='lsilogic'/>
+ <source file='[datastore] directory/fedora11.vmdk'/>
+ <target dev='sda' bus='scsi'/>
+ </disk>
+ <interface type='bridge'>
+ <mac address='00:50:56:91:48:c7'/>
+ <source bridge='vm network'/>
+ </interface>
+ </devices>
+</domain>
diff --git a/tests/vmx2xmltest.c b/tests/vmx2xmltest.c
index b4eb5d5..4c93059 100644
--- a/tests/vmx2xmltest.c
+++ b/tests/vmx2xmltest.c
@@ -26,6 +26,7 @@ testCompareFiles(const char *vmx, const char *xml, esxVI_APIVersion apiVersion)
char *vmxPtr = &(vmxData[0]);
char *xmlPtr = &(xmlData[0]);
virDomainDefPtr def = NULL;
+ virErrorPtr err = NULL;
if (virtTestLoadFile(vmx, &vmxPtr, MAX_FILE) < 0) {
goto failure;
@@ -39,12 +40,16 @@ testCompareFiles(const char *vmx, const char *xml, esxVI_APIVersion apiVersion)
apiVersion);
if (def == NULL) {
+ err = virGetLastError();
+ fprintf(stderr, "ERROR: %s\n", err != NULL ? err->message : "<unknown>");
goto failure;
}
formatted = virDomainDefFormat(def, VIR_DOMAIN_XML_SECURE);
if (formatted == NULL) {
+ err = virGetLastError();
+ fprintf(stderr, "ERROR: %s\n", err != NULL ? err->message : "<unknown>");
goto failure;
}
@@ -117,6 +122,10 @@ mymain(int argc, char **argv)
} \
} while (0)
+
+ DO_TEST("case-insensitive-1", "case-insensitive-1", esxVI_APIVersion_25);
+ DO_TEST("case-insensitive-2", "case-insensitive-2", esxVI_APIVersion_25);
+
DO_TEST("minimal", "minimal", esxVI_APIVersion_25);
DO_TEST("minimal-64bit", "minimal-64bit", esxVI_APIVersion_25);
--
1.6.3.3
3
3