[libvirt] [PATCH 0/3] Check CPU features during migration
by Jiri Denemark
Jiri Denemark (3):
cpuUpdate() for updating guest CPU according to host CPU
Helper function for making a deep-copy of virCPUDefPtr
Introduce UPDATE_CPU flag for virDomainGetXMLDesc
include/libvirt/libvirt.h.in | 5 +-
src/conf/cpu_conf.c | 37 ++++++++++
src/conf/cpu_conf.h | 3 +
src/cpu/cpu.c | 22 ++++++
src/cpu/cpu.h | 9 +++
src/cpu/cpu_generic.c | 1 +
src/cpu/cpu_x86.c | 161 +++++++++++++++++++++++++++++++++++++-----
src/libvirt.c | 3 +-
src/libvirt_private.syms | 2 +
src/qemu/qemu_driver.c | 54 ++++++++++++--
tools/virsh.c | 4 +
11 files changed, 273 insertions(+), 28 deletions(-)
14 years, 8 months
[libvirt] [PATCH] Don't replace persistent domain config with migrated config
by Jiri Denemark
When a domain is defined on host1, migrated to host2 and then migrated
back to host1, its current configuration would overwrite the libvirtd's
in-memory copy of persistent configuration of that domain. This is not
desired as we want to preserve the persistent configuration untouched.
This patch introduces new 'live' parameter to virDomainAssignDef.
Passing 'true' for 'live' means the configuration passed to
virDomainAssignDef describes a configuration of live instance of the
domain. This applies for saved domains which are being restored or for
incoming domains during migration.
All callers have been changed to pass the appropriate value.
---
src/conf/domain_conf.c | 19 +++++++++++++------
src/conf/domain_conf.h | 5 ++++-
src/lxc/lxc_driver.c | 4 ++--
src/opennebula/one_driver.c | 4 ++--
src/openvz/openvz_driver.c | 4 ++--
src/qemu/qemu_driver.c | 10 +++++-----
src/test/test_driver.c | 10 +++++-----
src/uml/uml_driver.c | 4 ++--
8 files changed, 35 insertions(+), 25 deletions(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 22e1679..4cc27ff 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -749,18 +749,25 @@ static virDomainObjPtr virDomainObjNew(virCapsPtr caps)
virDomainObjPtr virDomainAssignDef(virCapsPtr caps,
virDomainObjListPtr doms,
- const virDomainDefPtr def)
+ const virDomainDefPtr def,
+ bool live)
{
virDomainObjPtr domain;
char uuidstr[VIR_UUID_STRING_BUFLEN];
if ((domain = virDomainFindByUUID(doms, def->uuid))) {
if (!virDomainObjIsActive(domain)) {
- virDomainDefFree(domain->def);
- domain->def = def;
+ if (live) {
+ /* save current configuration to be restored on domain shutdown */
+ if (!domain->newDef)
+ domain->newDef = domain->def;
+ domain->def = def;
+ } else {
+ virDomainDefFree(domain->def);
+ domain->def = def;
+ }
} else {
- if (domain->newDef)
- virDomainDefFree(domain->newDef);
+ virDomainDefFree(domain->newDef);
domain->newDef = def;
}
@@ -5780,7 +5787,7 @@ virDomainObjPtr virDomainLoadConfig(virCapsPtr caps,
newVM = 0;
}
- if (!(dom = virDomainAssignDef(caps, doms, def)))
+ if (!(dom = virDomainAssignDef(caps, doms, def, false)))
goto error;
dom->autostart = autostart;
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index 44fff0c..9e6cb69 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -810,9 +810,12 @@ void virDomainObjRef(virDomainObjPtr vm);
/* Returns 1 if the object was freed, 0 if more refs exist */
int virDomainObjUnref(virDomainObjPtr vm);
+/* live == true means def describes an active domain (being migrated or
+ * restored) as opposed to a new persistent configuration of the domain */
virDomainObjPtr virDomainAssignDef(virCapsPtr caps,
virDomainObjListPtr doms,
- const virDomainDefPtr def);
+ const virDomainDefPtr def,
+ bool live);
void virDomainRemoveInactive(virDomainObjListPtr doms,
virDomainObjPtr dom);
diff --git a/src/lxc/lxc_driver.c b/src/lxc/lxc_driver.c
index ba13065..9786cc0 100644
--- a/src/lxc/lxc_driver.c
+++ b/src/lxc/lxc_driver.c
@@ -381,7 +381,7 @@ static virDomainPtr lxcDomainDefine(virConnectPtr conn, const char *xml)
}
if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains, def)))
+ &driver->domains, def, false)))
goto cleanup;
def = NULL;
vm->persistent = 1;
@@ -1368,7 +1368,7 @@ lxcDomainCreateAndStart(virConnectPtr conn,
if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains, def)))
+ &driver->domains, def, false)))
goto cleanup;
def = NULL;
diff --git a/src/opennebula/one_driver.c b/src/opennebula/one_driver.c
index e1d1efc..f7fbd46 100644
--- a/src/opennebula/one_driver.c
+++ b/src/opennebula/one_driver.c
@@ -251,7 +251,7 @@ static virDomainPtr oneDomainDefine(virConnectPtr conn, const char *xml)
goto return_point;
if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains, def))) {
+ &driver->domains, def, false))) {
virDomainDefFree(def);
goto return_point;
}
@@ -456,7 +456,7 @@ oneDomainCreateAndStart(virConnectPtr conn,
}
if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains, def))) {
+ &driver->domains, def, false))) {
virDomainDefFree(def);
goto return_point;
}
diff --git a/src/openvz/openvz_driver.c b/src/openvz/openvz_driver.c
index 50aadfc..e0a0768 100644
--- a/src/openvz/openvz_driver.c
+++ b/src/openvz/openvz_driver.c
@@ -825,7 +825,7 @@ openvzDomainDefineXML(virConnectPtr conn, const char *xml)
goto cleanup;
}
if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains, vmdef)))
+ &driver->domains, vmdef, false)))
goto cleanup;
vmdef = NULL;
vm->persistent = 1;
@@ -905,7 +905,7 @@ openvzDomainCreateXML(virConnectPtr conn, const char *xml,
goto cleanup;
}
if (!(vm = virDomainAssignDef(driver->caps,
- &driver->domains, vmdef)))
+ &driver->domains, vmdef, false)))
goto cleanup;
vmdef = NULL;
/* All OpenVZ domains seem to be persistent - this is a bit of a violation
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 257f914..2c81d68 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -3563,7 +3563,7 @@ static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char *xml,
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def)))
+ def, false)))
goto cleanup;
def = NULL;
@@ -5220,7 +5220,7 @@ static int qemudDomainRestore(virConnectPtr conn,
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def))) {
+ def, true))) {
qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to assign new VM"));
goto cleanup;
@@ -5761,7 +5761,7 @@ static virDomainPtr qemudDomainDefine(virConnectPtr conn, const char *xml) {
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def))) {
+ def, false))) {
goto cleanup;
}
def = NULL;
@@ -8391,7 +8391,7 @@ qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def))) {
+ def, true))) {
qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to assign new VM"));
goto cleanup;
@@ -8622,7 +8622,7 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn,
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def))) {
+ def, true))) {
qemuReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("failed to assign new VM"));
goto cleanup;
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
index f54ebae..9a880f1 100644
--- a/src/test/test_driver.c
+++ b/src/test/test_driver.c
@@ -554,7 +554,7 @@ static int testOpenDefault(virConnectPtr conn) {
if (testDomainGenerateIfnames(conn, domdef) < 0)
goto error;
if (!(domobj = virDomainAssignDef(privconn->caps,
- &privconn->domains, domdef)))
+ &privconn->domains, domdef, false)))
goto error;
domdef = NULL;
@@ -910,7 +910,7 @@ static int testOpenFromFile(virConnectPtr conn,
if (testDomainGenerateIfnames(conn, def) < 0 ||
!(dom = virDomainAssignDef(privconn->caps,
- &privconn->domains, def))) {
+ &privconn->domains, def, false))) {
virDomainDefFree(def);
goto error;
}
@@ -1308,7 +1308,7 @@ testDomainCreateXML(virConnectPtr conn, const char *xml,
if (testDomainGenerateIfnames(conn, def) < 0)
goto cleanup;
if (!(dom = virDomainAssignDef(privconn->caps,
- &privconn->domains, def)))
+ &privconn->domains, def, false)))
goto cleanup;
def = NULL;
@@ -1853,7 +1853,7 @@ static int testDomainRestore(virConnectPtr conn,
if (testDomainGenerateIfnames(conn, def) < 0)
goto cleanup;
if (!(dom = virDomainAssignDef(privconn->caps,
- &privconn->domains, def)))
+ &privconn->domains, def, true)))
goto cleanup;
def = NULL;
@@ -2302,7 +2302,7 @@ static virDomainPtr testDomainDefineXML(virConnectPtr conn,
if (testDomainGenerateIfnames(conn, def) < 0)
goto cleanup;
if (!(dom = virDomainAssignDef(privconn->caps,
- &privconn->domains, def)))
+ &privconn->domains, def, false)))
goto cleanup;
def = NULL;
dom->persistent = 1;
diff --git a/src/uml/uml_driver.c b/src/uml/uml_driver.c
index bf06787..0c12469 100644
--- a/src/uml/uml_driver.c
+++ b/src/uml/uml_driver.c
@@ -1283,7 +1283,7 @@ static virDomainPtr umlDomainCreate(virConnectPtr conn, const char *xml,
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def)))
+ def, false)))
goto cleanup;
def = NULL;
@@ -1619,7 +1619,7 @@ static virDomainPtr umlDomainDefine(virConnectPtr conn, const char *xml) {
if (!(vm = virDomainAssignDef(driver->caps,
&driver->domains,
- def)))
+ def, false)))
goto cleanup;
def = NULL;
vm->persistent = 1;
--
1.7.0.3
14 years, 8 months
[libvirt] [PATCH 0/1] Disk error policy
by David Allan
Here's a revised patch for disk error policy XML incorporating the feedback from Dan and Daniel.
Dave
David Allan (1):
Add disk error policy to domain XML
docs/schemas/domain.rng | 12 +++++++-
src/conf/domain_conf.c | 18 +++++++++++
src/conf/domain_conf.h | 10 ++++++
src/libvirt_private.syms | 2 +-
src/qemu/qemu_conf.c | 17 +++++++++-
tests/qemuargv2xmltest.c | 3 ++
.../qemuxml2argv-disk-drive-error-policy-stop.args | 1 +
.../qemuxml2argv-disk-drive-error-policy-stop.xml | 32 ++++++++++++++++++++
tests/qemuxml2argvtest.c | 3 ++
9 files changed, 94 insertions(+), 4 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-disk-drive-error-policy-stop.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-disk-drive-error-policy-stop.xml
14 years, 8 months
[libvirt] [PATCH] [RFC] Use enum of virDomainNetType
by Stefan Berger
To find out where the net type 'direct' needs to be handled I introduced
the 'enum virDomainNetType' in the virDomainNetDef structure and let the
compiler tell me where the case statement is missing. Then I added the
unhandled device statement to the UML driver.
Signed-off-by; Stefan Berger <stefanb(a)us.ibm.com>
Index: libvirt-plain/src/conf/domain_conf.h
===================================================================
--- libvirt-plain.orig/src/conf/domain_conf.h
+++ libvirt-plain/src/conf/domain_conf.h
@@ -251,7 +251,7 @@ enum virDomainNetdevMacvtapType {
typedef struct _virDomainNetDef virDomainNetDef;
typedef virDomainNetDef *virDomainNetDefPtr;
struct _virDomainNetDef {
- int type;
+ enum virDomainNetType type;
unsigned char mac[VIR_MAC_BUFLEN];
char *model;
union {
Index: libvirt-plain/src/lxc/lxc_driver.c
===================================================================
--- libvirt-plain.orig/src/lxc/lxc_driver.c
+++ libvirt-plain/src/lxc/lxc_driver.c
@@ -800,6 +800,16 @@ static int lxcSetupInterfaces(virConnect
case VIR_DOMAIN_NET_TYPE_BRIDGE:
bridge = def->nets[i]->data.bridge.brname;
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_ETHERNET:
+ case VIR_DOMAIN_NET_TYPE_SERVER:
+ case VIR_DOMAIN_NET_TYPE_CLIENT:
+ case VIR_DOMAIN_NET_TYPE_MCAST:
+ case VIR_DOMAIN_NET_TYPE_INTERNAL:
+ case VIR_DOMAIN_NET_TYPE_DIRECT:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
DEBUG("bridge: %s", bridge);
Index: libvirt-plain/src/qemu/qemu_conf.c
===================================================================
--- libvirt-plain.orig/src/qemu/qemu_conf.c
+++ libvirt-plain/src/qemu/qemu_conf.c
@@ -2686,6 +2686,14 @@ qemuBuildHostNetStr(virDomainNetDefPtr n
net->data.socket.address,
net->data.socket.port);
break;
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_ETHERNET:
+ case VIR_DOMAIN_NET_TYPE_NETWORK:
+ case VIR_DOMAIN_NET_TYPE_BRIDGE:
+ case VIR_DOMAIN_NET_TYPE_INTERNAL:
+ case VIR_DOMAIN_NET_TYPE_DIRECT:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
type_sep = ',';
break;
Index: libvirt-plain/src/uml/uml_conf.c
===================================================================
--- libvirt-plain.orig/src/uml/uml_conf.c
+++ libvirt-plain/src/uml/uml_conf.c
@@ -244,6 +244,14 @@ umlBuildCommandLineNet(virConnectPtr con
umlReportError(conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR, "%s",
_("internal networking type not supported"));
goto error;
+
+ case VIR_DOMAIN_NET_TYPE_DIRECT:
+ umlReportError(conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR, "%s",
+ _("direct networking type not supported"));
+ goto error;
+
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
virBufferVSprintf(&buf, ",%02x:%02x:%02x:%02x:%02x:%02x",
Index: libvirt-plain/src/conf/domain_conf.c
===================================================================
--- libvirt-plain.orig/src/conf/domain_conf.c
+++ libvirt-plain/src/conf/domain_conf.c
@@ -450,6 +450,10 @@ void virDomainNetDefFree(virDomainNetDef
case VIR_DOMAIN_NET_TYPE_DIRECT:
VIR_FREE(def->data.direct.linkdev);
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
VIR_FREE(def->ifname);
@@ -1740,7 +1744,7 @@ virDomainNetDefParseXML(virCapsPtr caps,
type = virXMLPropString(node, "type");
if (type != NULL) {
- if ((def->type = virDomainNetTypeFromString(type)) < 0) {
+ if ((int)(def->type = virDomainNetTypeFromString(type)) < 0) {
virDomainReportError(VIR_ERR_INTERNAL_ERROR,
_("unknown interface type '%s'"), type);
goto error;
@@ -1949,6 +1953,10 @@ virDomainNetDefParseXML(virCapsPtr caps,
dev = NULL;
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
if (ifname != NULL) {
@@ -4861,6 +4869,10 @@ virDomainNetDefFormat(virBufferPtr buf,
virDomainNetdevMacvtapTypeToString(def->data.direct.mode));
virBufferAddLit(buf, "/>\n");
break;
+
+ case VIR_DOMAIN_NET_TYPE_USER:
+ case VIR_DOMAIN_NET_TYPE_LAST:
+ break;
}
if (def->ifname)
14 years, 8 months
Re: [libvirt] [Qemu-devel] Re: Supporting hypervisor specific APIs in libvirt
by Anthony Liguori
On 03/23/2010 10:57 AM, Paul Brook wrote:
>>> I think there is a serious divergence of approach there, instanciating
>>> API stating 'we are gonna deprecate them sooner or later' tell the
>>> application developper 'my time is more important than yours' and not
>>> really something I like to carry to the API users.
>>> The main goal of libvirt remains to provide APIs needed to unify the
>>> development of the virtualization layers. Having APIs which makes
>>> sense only for one or 2 virtualization engines is not a problem in
>>> itself, it just raises questions about the actual semantic of that API.
>>> If that semantic is sound, then I see no reason to not add it, really
>>> and we actually often do.
>>>
>> Yeah, but the problem we're facing is, I want there to be an API added
>> to the management layer as part of the feature commit in qemu. If there
>> has to be a discussion and decisions about how to model the API, it's
>> not going to be successful.
>>
> I thought the monitor protocol *was* our API. If not, why not?
>
It is. But our API is missing key components like guest enumeration.
So the fundamental topic here is, do we introduce these missing
components to allow people to build directly to our interface or do we
make use of the functionality that libvirt already provides if they can
plumb our API directly to users.
Regards,
Anthony Liguori
> Paul
>
14 years, 8 months
[libvirt] [PATCH] Mention direct device support since 0.7.7 in web doc
by Stefan Berger
In the web documentation mention that the direct device support is there
since libvirt 0.7.7. A Linux kernel 2.6.34 is required for macvtap to be
available as standard device.
Index: libvirt-plain/docs/formatdomain.html.in
===================================================================
--- libvirt-plain.orig/docs/formatdomain.html.in
+++ libvirt-plain/docs/formatdomain.html.in
@@ -741,8 +741,11 @@
<p>
Provides direct attachment of the virtual machine's NIC to the given
- physial interface of the host. This setup requires the Linux macvtap
- driver to be available. One of the modes 'vepa'
+ physial interface of the host.
+ <span class="since">Since 0.7.7 (QEMU and KVM only)</span><br>
+ This setup requires the Linux macvtap
+ driver to be available. <span class="since">(Since Linux 2.6.34.)</span>
+ One of the modes 'vepa'
( <a href="http://www.ieee802.org/1/files/public/docs2009/new-evb-congdon-vepa-modul...">
'Virtual Ethernet Port Aggregator'</a>), 'bridge' or 'private'
can be chosen for the operation mode of the macvtap device, 'vepa'
14 years, 8 months
[libvirt] [PATCH] Fix QEMU cpu affinity at startup to include all threads
by Daniel P. Berrange
The QEMU cpu affinity is used in NUMA scenarios to ensure that
guest memory is allocated from a specific node. Normally memory
is allocate on demand in vCPU threads, but when using hugepages
the initial thread leader allocates memory upfront. libvirt was
not setting affinity of the thread leader, or I/O threads. This
patch changes the code to set the process affinity in between
the fork()/exec() of QEMU. This ensures that every single QEMU
thread gets the affinity
* src/qemu/qemu_driver.c: Set affinity on entire QEMU process
at startup
---
src/qemu/qemu_driver.c | 29 ++++++++++++++++-------------
1 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 257f914..2598deb 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1701,6 +1701,9 @@ qemuDetectVcpuPIDs(struct qemud_driver *driver,
return 0;
}
+/*
+ * To be run between fork/exec of QEMU only
+ */
static int
qemudInitCpuAffinity(virDomainObjPtr vm)
{
@@ -1708,7 +1711,8 @@ qemudInitCpuAffinity(virDomainObjPtr vm)
virNodeInfo nodeinfo;
unsigned char *cpumap;
int cpumaplen;
- qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ DEBUG0("Setting CPU affinity");
if (nodeGetInfo(NULL, &nodeinfo) < 0)
return -1;
@@ -1740,14 +1744,14 @@ qemudInitCpuAffinity(virDomainObjPtr vm)
VIR_USE_CPU(cpumap, i);
}
- /* The XML config only gives a per-VM affinity, so we apply
- * the same mapping to all vCPUs */
- for (i = 0 ; i < priv->nvcpupids ; i++) {
- if (virProcessInfoSetAffinity(priv->vcpupids[i],
- cpumap, cpumaplen, maxcpu) < 0) {
- VIR_FREE(cpumap);
- return -1;
- }
+ /* We are assuming we are running between fork/exec of QEMU, so
+ * that getpid() gives the QEMU process ID and we know that
+ * no threads are running.
+ */
+ if (virProcessInfoSetAffinity(getpid(),
+ cpumap, cpumaplen, maxcpu) < 0) {
+ VIR_FREE(cpumap);
+ return -1;
}
VIR_FREE(cpumap);
@@ -2653,6 +2657,9 @@ struct qemudHookData {
static int qemudSecurityHook(void *data) {
struct qemudHookData *h = data;
+ if (qemudInitCpuAffinity(h->vm) < 0)
+ return -1;
+
if (qemuAddToCgroup(h->driver, h->vm->def) < 0)
return -1;
@@ -2943,10 +2950,6 @@ static int qemudStartVMDaemon(virConnectPtr conn,
if (qemuDetectVcpuPIDs(driver, vm) < 0)
goto abort;
- DEBUG0("Setting CPU affinity");
- if (qemudInitCpuAffinity(vm) < 0)
- goto abort;
-
DEBUG0("Setting any required VM passwords");
if (qemuInitPasswords(conn, driver, vm, qemuCmdFlags) < 0)
goto abort;
--
1.6.2.5
14 years, 8 months
[libvirt] [PATCH][Network] Make dhcp service enabled only if //ip/dhcp exists in network xml
by Satoru SATOH
Libvirtd enabls DHCP service on virtual networks even if the element
'//ip/dhcp' does not exist in the network xml. The following patch fixes
this problem.
Signed-off-by: Satoru SATOH <satoru.satoh(a)gmail.com>
---
src/conf/network_conf.c | 2 ++
src/conf/network_conf.h | 2 ++
src/network/bridge_driver.c | 5 +----
3 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/src/conf/network_conf.c b/src/conf/network_conf.c
index 1f3a44c..e41775a 100644
--- a/src/conf/network_conf.c
+++ b/src/conf/network_conf.c
@@ -466,6 +466,8 @@ virNetworkDefParseXML(xmlXPathContextPtr ctxt)
if ((ip = virXPathNode("./ip[1]", ctxt)) &&
virNetworkIPParseXML(def, ip) < 0)
goto error;
+
+ def->dhcp = (virXPathNode("./ip/dhcp", ctxt) != NULL ? 1 : 0);
}
diff --git a/src/conf/network_conf.h b/src/conf/network_conf.h
index 127a23a..847ddd3 100644
--- a/src/conf/network_conf.h
+++ b/src/conf/network_conf.h
@@ -74,6 +74,8 @@ struct _virNetworkDef {
char *netmask;
char *network;
+ unsigned int dhcp;
+
unsigned int nranges; /* Zero or more dhcp ranges */
virNetworkDHCPRangeDefPtr ranges;
diff --git a/src/network/bridge_driver.c b/src/network/bridge_driver.c
index 83ab00e..6dcf7b4 100644
--- a/src/network/bridge_driver.c
+++ b/src/network/bridge_driver.c
@@ -943,12 +943,9 @@ static int networkStartNetworkDaemon(struct network_driver *driver,
goto err_delbr2;
}
- if ((network->def->ipAddress ||
- network->def->nranges) &&
- dhcpStartDhcpDaemon(network) < 0)
+ if (network->def->dhcp && dhcpStartDhcpDaemon(network) < 0)
goto err_delbr2;
-
/* Persist the live configuration now we have bridge info */
if (virNetworkSaveConfig(NETWORK_STATE_DIR, network->def) < 0) {
goto err_kill;
--
1.6.2.5
14 years, 8 months
[libvirt] [PATCH 1/1] XenAPI remote storage support on libvirt
by Sharadha Prabhakar (3P)
This patch contains the APIs for support XenAPI remote storage support on libvirt.
This patch allows you to list storage pools, storage volumes, get information
about storage pools and volumes and create storage pools of type NETFS
with format type nfs,cifs-iso,nfs-iso using virsh. You can also create
VMs with storage pools attached and destroy storage pools.
While creating a VM with storage. The disk tag's source element should be
of the form '/storage pool uuid/storage volume uuid'.
--- ./libvirt_org/src/xenapi/xenapi_storage_driver.c 1970-01-01 01:00:00.000000000 +0100
+++ ./libvirt/src/xenapi/xenapi_storage_driver.c 2010-03-24 15:27:43.000000000 +0000
@@ -0,0 +1,1499 @@
+/*
+ * xenapi_storage_driver.c: Xen API storage driver APIs
+ * Copyright (C) 2009, 2010 Citrix Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Sharadha Prabhakar <sharadha.prabhakar(a)citrix.com>
+ */
+
+#include <config.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <libxml/uri.h>
+#include <xen_internal.h>
+#include <libxml/parser.h>
+#include <curl/curl.h>
+#include <xen/api/xen_common.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_vm.h>
+#include <xen/api/xen_all.h>
+#include <xen/api/xen_vm_metrics.h>
+#include <xen/api/xen_api_failure.h>
+#include <xen/dom0_ops.h>
+
+#include "libvirt_internal.h"
+#include "libvirt/libvirt.h"
+#include "virterror_internal.h"
+#include "storage_conf.h"
+#include "datatypes.h"
+#include "xenapi_driver.h"
+#include "util.h"
+#include "uuid.h"
+#include "authhelper.h"
+#include "memory.h"
+#include "driver.h"
+#include "util/logging.h"
+#include "buf.h"
+#include "xenapi_utils.h"
+#include "xenapi_storage_driver.h"
+
+/*
+*XenapiStorageOpen
+*
+*Authenticates and creates a session with the server
+*Returns VIR_DRV_OPEN_SUCCESS on success, else VIR_DRV_OPEN_ERROR
+*/
+static virDrvOpenStatus
+xenapiStorageOpen (virConnectPtr conn, virConnectAuthPtr auth, int flags ATTRIBUTE_UNUSED)
+{
+ char *username = NULL;
+ char *password = NULL;
+ struct _xenapiStoragePrivate *privP = NULL;
+
+ if (conn->uri == NULL || conn->uri->scheme == NULL ||
+ STRCASENEQ(conn->uri->scheme, "XenAPI")) {
+ return VIR_DRV_OPEN_DECLINED;
+ }
+
+ if (conn->uri->server == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Server name not in URI");
+ goto error;
+ }
+
+ if (auth == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Authentication Credentials not found");
+ goto error;
+ }
+
+ if (conn->uri->user != NULL) {
+ username = strdup(conn->uri->user);
+
+ if (username == NULL) {
+ virReportOOMError();
+ goto error;
+ }
+ } else {
+ username = virRequestUsername(auth, NULL, conn->uri->server);
+
+ if (username == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Username request failed");
+ goto error;
+ }
+ }
+
+ password = virRequestPassword(auth, username, conn->uri->server);
+
+ if (password == NULL) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED,
+ "Password request failed");
+ goto error;
+ }
+
+ if (VIR_ALLOC(privP) < 0) {
+ virReportOOMError();
+ goto error;
+ }
+
+ if (virAsprintf(&privP->url, "https://%s", conn->uri->server) < 0) {
+ virReportOOMError();
+ goto error;
+ }
+
+ if (xenapiUtil_ParseQuery(conn, conn->uri, &privP->noVerify) < 0)
+ goto error;
+
+ xmlInitParser();
+ xmlKeepBlanksDefault(0);
+ xen_init();
+ curl_global_init(CURL_GLOBAL_ALL);
+
+ privP->session = xen_session_login_with_password(call_func, privP, username,
+ password, xen_api_latest_version);
+
+ if (privP->session != NULL && privP->session->ok) {
+ conn->storagePrivateData = privP;
+ VIR_FREE(username);
+ VIR_FREE(password);
+ return VIR_DRV_OPEN_SUCCESS;
+ }
+
+ xenapiSessionErrorHandler(conn, VIR_ERR_AUTH_FAILED, "");
+
+ error:
+ VIR_FREE(username);
+ VIR_FREE(password);
+
+ if (privP != NULL) {
+ if (privP->session != NULL)
+ xenSessionFree(privP->session);
+
+ VIR_FREE(privP->url);
+ VIR_FREE(privP);
+ }
+
+ return VIR_DRV_OPEN_ERROR;
+}
+
+
+/*
+*XenapiStorageClose
+*
+*Closes the session with the server
+*Returns 0 on success
+*/
+static int
+xenapiStorageClose (virConnectPtr conn)
+{
+ struct _xenapiStoragePrivate *priv = (struct _xenapiStoragePrivate *)conn->storagePrivateData;
+ xen_session_logout(priv->session);
+ VIR_FREE(priv->url);
+ VIR_FREE(priv);
+ return 0;
+
+}
+
+/*
+*XenapiNumOfStoragePools
+*
+*Provides the number of active storage pools
+*Returns number of pools found on success, or -1 on error
+*/
+static int
+xenapiNumOfStoragePools (virConnectPtr conn)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ bool currently_attached;
+ int cnt=-1,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; i<sr_set->size; i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 1) cnt++;
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return cnt;
+}
+
+/*
+*XenapiListStoragePools
+*
+*Provides the list of names of active storage pools upto maxnames
+*returns number of names in the list on success ,or -1 or error
+*/
+static int
+xenapiListStoragePools (virConnectPtr conn, char **const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ char *usenames=NULL;
+ bool currently_attached;
+ int count=0,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; (i<sr_set->size) && (count<maxnames); i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 1) {
+ if(!(usenames = strdup(record->name_label))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ names[count++] = usenames;
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ goto cleanup;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+ cleanup:
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ while (--count>=0) VIR_FREE(names[count]);
+ return -1;
+}
+
+
+/*
+*XenapiListDefinedStoragePools
+*
+*Provides the list of names of inactive storage pools upto maxnames
+*
+*/
+static int
+xenapiListDefinedStoragePools (virConnectPtr conn, char **const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ char *usenames=NULL;
+ bool currently_attached;
+ int count=0,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; (i<sr_set->size) && (count<maxnames); i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 0) {
+ if(!(usenames = strdup(record->name_label))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+ names[count++] = usenames;
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ goto cleanup;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+
+ cleanup:
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ while (--count>=0) free(names[count]);
+ return -1;
+}
+
+
+/*
+*XenapiNumOfDefinedStoragePools
+*
+*Provides the number of inactive storage pools
+*
+*/
+static int
+xenapiNumOfDefinedStoragePools (virConnectPtr conn)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_sr_record *record=NULL;
+ int cnt=-1,i;
+ xen_session * session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_all(session, &sr_set) && sr_set->size>0) {
+ for (i=0; i<sr_set->size; i++) {
+ if (xen_sr_get_record(session, &record, sr_set->contents[i])) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr_set->contents[i]) && pbd_set->size>0) {
+ bool currently_attached;
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ if (currently_attached == 0) cnt++;
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Devices not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, "");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return cnt;
+}
+
+/*
+*XenapiStoragePoolCreateXML
+*
+*Creates a Storage Pool from the given XML
+* Only storage pool type NETFS is supported for now
+*/
+static virStoragePoolPtr
+xenapiStoragePoolCreateXML (virConnectPtr conn, const char *xmlDesc,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ virStoragePoolDefPtr pdef = NULL;
+ char *pooltype=NULL;
+ xen_sr sr=NULL;
+ xen_host host=NULL;
+ virBuffer path = VIR_BUFFER_INITIALIZER;
+ xen_string_string_map *device_config=NULL,*smconfig=NULL;
+ virStoragePoolPtr poolPtr = NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_sr_record *sr_record = NULL;
+ xen_session *session = ((struct _xenapiPrivate *)(conn->privateData))->session;
+ if(!(pdef = virStoragePoolDefParseString(xmlDesc))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't parse XML");
+ virBufferFreeAndReset(&path);
+ return NULL;
+ }
+ if (pdef->type != VIR_STORAGE_POOL_NETFS) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Only Pool type NETFS is currently supported");
+ goto cleanup;
+ } else {
+ if (pdef->source.format == VIR_STORAGE_POOL_NETFS_NFS_ISO) {
+ pooltype = (char *)"iso";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Host name required for creating NFS ISO SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating NFS ISO SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(1);
+ if (!(device_config->contents[0].key = strdup("location"))) {
+ goto cleanup_device_config;
+ }
+ virBufferVSprintf(&path,"%s:%s",pdef->source.host.name, pdef->source.dir);
+ device_config->contents[0].val = virBufferContentAndReset(&path);
+ smconfig = xen_string_string_map_alloc(0);
+ }
+ else if (pdef->source.format == VIR_STORAGE_POOL_NETFS_CIFS_ISO) {
+ pooltype = (char *)"iso";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Host name required for creating CIFS ISO SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating CIFS ISO SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(1);
+ if (!(device_config->contents[0].key = strdup("location")))
+ goto cleanup_device_config;
+
+ if (pdef->source.host.name[0] != '/') {
+ virBufferVSprintf(&path,"//%s%s",pdef->source.host.name, pdef->source.dir);
+ }
+ else {
+ virBufferVSprintf(&path,"%s%s",pdef->source.host.name, pdef->source.dir);
+ }
+ device_config->contents[0].val = virBufferContentAndReset(&path);
+ smconfig = xen_string_string_map_alloc(1);
+ if (!(smconfig->contents[0].key = strdup("iso_type"))) {
+ xen_string_string_map_free(smconfig);
+ xen_string_string_map_free(device_config);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ if (!(smconfig->contents[0].val = strdup("cifs"))) {
+ xen_string_string_map_free(smconfig);
+ xen_string_string_map_free(device_config);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ }
+ else if (pdef->source.format == VIR_STORAGE_POOL_NETFS_NFS) {
+ pooltype = (char *)"nfs";
+ if (!pdef->source.host.name) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Server name required for creating NFS SR");
+ goto cleanup;
+ }
+ if (!pdef->source.dir) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Directory required for creating NFS SR");
+ goto cleanup;
+ }
+ device_config = xen_string_string_map_alloc(2);
+ if (!(device_config->contents[0].key = strdup("server")))
+ goto cleanup_device_config;
+ if (!(device_config->contents[0].val = strdup(pdef->source.host.name)))
+ goto cleanup_device_config;
+ if (!(device_config->contents[1].key = strdup("serverpath")))
+ goto cleanup_device_config;
+ if (!(device_config->contents[1].val = strdup(pdef->source.dir)))
+ goto cleanup_device_config;
+ smconfig = xen_string_string_map_alloc(0);
+ virBufferFreeAndReset(&path);
+ }
+ else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Format type of NETFS not supported by the hypervisor");
+ goto cleanup;
+ }
+ }
+ if (!xen_session_get_this_host(session, &host, session)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ if (!xen_sr_create(session, &sr, host, device_config, 0, pdef->name, (char *)"",
+ pooltype, (char *) "iso", true, smconfig)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ xen_host_free(host);
+ return NULL;
+ }
+ if (!xen_sr_get_record(session, &sr_record, sr)){
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ virStoragePoolDefFree(pdef);
+ xen_host_free(host);
+ xen_sr_free(sr);
+ return NULL;
+ }
+ virUUIDParse(sr_record->uuid,raw_uuid);
+ poolPtr = virGetStoragePool(conn,(const char *)sr_record->name_label,raw_uuid);
+ if (!poolPtr) xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get a valid storage pool pointer");
+ virStoragePoolDefFree(pdef);
+ xen_sr_record_free(sr_record);
+ xen_host_free(host);
+ return poolPtr;
+
+ cleanup_device_config:
+ xen_string_string_map_free(device_config);
+
+ cleanup:
+ virStoragePoolDefFree(pdef);
+ virBufferFreeAndReset(&path);
+ return NULL;
+}
+
+static int
+xenapiStoragePoolBuild (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ return 0; /* return SUCCESS for now */
+}
+
+
+static int
+xenapiStoragePoolCreate (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ return 0;
+}
+
+
+/*
+*XenapiStoragePoolSetAutostart
+*
+*Autostart option is always ON by default and is not allowed to be OFF
+*
+*/
+static int
+xenapiStoragePoolSetAutostart (virStoragePoolPtr pool, int autostart)
+{
+ virConnectPtr conn = pool->conn;
+ if (autostart == 1) {
+ VIR_DEBUG0("XenAPI storage pools autostart option is always ON by default");
+ return 0;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Hypervisor doesn't allow autostart to be OFF");
+ return -1;
+ }
+}
+
+
+/*
+*XenapiStoragePoolGetAutostart
+*
+*Returns the storage pool autostart option. Which is always ON
+*
+*/
+static int
+xenapiStoragePoolGetAutostart (virStoragePoolPtr pool ATTRIBUTE_UNUSED,
+ int * autostart)
+{
+ *autostart=1; /* XenAPI storage pools always have autostart set to ON */
+ return 0;
+}
+
+
+/*
+*XenapiStoragePoolLookupByName
+*
+* storage pool based on its unique name
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByName (virConnectPtr conn,
+ const char * name)
+{
+ virStoragePoolPtr poolPtr=NULL;
+ xen_sr_record *record=NULL;
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_by_name_label(session, &sr_set, (char *)name) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ sr = sr_set->contents[0];
+ if (!xen_sr_get_record(session, &record, sr)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ virUUIDParse(record->uuid,raw_uuid);
+ if (!(poolPtr = virGetStoragePool(conn,name,raw_uuid)))
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool pointer not available");
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return poolPtr;
+}
+
+
+/*
+*XenapiStoragePoolGetXMLDesc
+*
+*Returns the configuration of a storage pool as XML
+*
+*/
+static char *
+xenapiStoragePoolGetXMLDesc (virStoragePoolPtr pool,
+ unsigned int flags ATTRIBUTE_UNUSED)
+{
+ xen_sr_record *record=NULL;
+ xen_sr sr=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ xen_pbd pbd=NULL;
+ char *pathDetails = NULL, *host=NULL, *path=NULL,*xml=NULL;
+ virConnectPtr conn = pool->conn;
+ virStoragePoolDefPtr pdef=NULL;
+ xen_string_string_map *smconfig=NULL;
+ bool cifs;
+ xen_string_string_map *deviceConfig=NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ int i;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr)) {
+ if (!xen_sr_get_record(session, &record, sr)) {
+ xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get SR information");
+ return NULL;
+ }
+ if (VIR_ALLOC(pdef)<0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ return NULL;
+ }
+ if (STREQ(record->type,"nfs") || STREQ(record->type,"iso"))
+ pdef->type = VIR_STORAGE_POOL_NETFS;
+ else if(STREQ(record->type,"iscsi"))
+ pdef->type = VIR_STORAGE_POOL_ISCSI;
+ else if(STREQ(record->type,"file"))
+ pdef->type = VIR_STORAGE_POOL_DIR;
+ else if(STREQ(record->type,"lvm"))
+ pdef->type = VIR_STORAGE_POOL_LOGICAL;
+ else if(STREQ(record->type,"ext")) {
+ pdef->type = VIR_STORAGE_POOL_FS;
+ pdef->source.format = VIR_STORAGE_POOL_FS_EXT3;
+ }
+ else if(STREQ(record->type,"hba"))
+ pdef->type = VIR_STORAGE_POOL_SCSI;
+
+ if (!(pdef->name = strdup(record->name_label))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ virUUIDParse(record->uuid,pdef->uuid);
+ pdef->allocation = (record->virtual_allocation)/1024;
+ pdef->capacity = (record->physical_size)/1024;
+ pdef->available = (record->physical_size - record->physical_utilisation)/1024;
+
+ if (STREQ(record->type,"iso")) {
+ if (xen_sr_get_sm_config(session, &smconfig, sr)){
+ cifs = false;
+ for (i=0;i<smconfig->size;i++){
+ if (STREQ(smconfig->contents[i].key,"iso_type")
+ && STREQ(smconfig->contents[i].val, "cifs"))
+ cifs = true;
+ break;
+ }
+ xen_string_string_map_free(smconfig);
+ xen_sr_get_pbds (session, &pbd_set, sr);
+ pbd = pbd_set->contents[0];
+ xen_pbd_get_device_config(session, &deviceConfig, pbd);
+ if (deviceConfig) {
+ for (i=0;i<deviceConfig->size;i++) {
+ if(STREQ(deviceConfig->contents[i].key,"location")) {
+ if (!(pathDetails = strdup(deviceConfig->contents[i].val))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ xen_string_string_map_free(deviceConfig);
+ xen_pbd_set_free(pbd_set);
+ return NULL;
+ }
+ break;
+ }
+ }
+ xen_string_string_map_free(deviceConfig);
+ xen_pbd_set_free(pbd_set);
+ }
+ if (pathDetails) {
+ if (VIR_ALLOC_N(host,strlen(pathDetails)) <0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ if (VIR_ALLOC_N(path,strlen(pathDetails)) <0) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ host[0]='\0';path[0]='\0';
+ if (cifs) {
+ pdef->source.format = VIR_STORAGE_POOL_NETFS_CIFS_ISO;
+ sscanf(pathDetails,"//%[^/]%s",host,path);
+ } else {
+ pdef->source.format = VIR_STORAGE_POOL_NETFS_NFS_ISO;
+ sscanf(pathDetails,"%[^:]:%s",host,path);
+ }
+ if (STRNEQ(host,"\0")) {
+ if (!(pdef->source.host.name = strdup(host))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ }
+ if (STRNEQ(path,"\0")) {
+ if (!(pdef->source.dir = strdup(path))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ return NULL;
+ }
+ }
+ VIR_FREE(host);
+ VIR_FREE(path);
+ VIR_FREE(pathDetails);
+ }
+ }
+ }
+ if (!(pdef->target.path = strdup("/"))) {
+ virReportOOMError();
+ xen_sr_record_free(record);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ xen_sr_record_free(record);
+ xml = virStoragePoolDefFormat(pdef);
+ virStoragePoolDefFree(pdef);
+ if (!xml)
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't convert to XML format");
+ return xml;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return NULL;
+ }
+}
+
+
+/*
+*XenapiStoragePoolNumOfVolumes
+*
+*Fetch the number of storage volumes within a pool
+*
+*/
+static int
+xenapiStoragePoolNumOfVolumes (virStoragePoolPtr pool ATTRIBUTE_UNUSED)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_set *vdi_set=NULL;
+ virConnectPtr conn = pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ int count=0;
+ if (xen_sr_get_by_name_label(session, &sr_set, pool->name) && sr_set->size!=0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool Name is not unique");
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ sr = sr_set->contents[0];
+ if (xen_sr_get_vdis(session, &vdi_set, sr) && vdi_set->size!=0) {
+ count = vdi_set->size;
+ xen_sr_set_free(sr_set);
+ xen_vdi_set_free(vdi_set);
+ return count;
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+ }
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ return -1;
+ }
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+}
+
+
+/*
+*XenapiStoragePoolListVolumes
+*
+*Fetch list of storage volume names, limiting to at most maxnames.
+*
+*/
+static int
+xenapiStoragePoolListVolumes (virStoragePoolPtr pool, char ** const names,
+ int maxnames)
+{
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_set *vdi_set=NULL;
+ xen_vdi vdi=NULL;
+ int count,i;
+ char *usenames = NULL;
+ virConnectPtr conn=pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_sr_get_by_name_label(session, &sr_set, pool->name) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return -1;
+ }
+ sr = sr_set->contents[0];
+ if (xen_sr_get_vdis(session, &vdi_set, sr) && vdi_set->size>0) {
+ for (i=0,count=0; (i<vdi_set->size) && (count<maxnames); i++) {
+ vdi = vdi_set->contents[i];
+ if (xen_vdi_get_name_label(session, &usenames, vdi)) {
+ names[count++] = usenames;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_vdi_set_free(vdi_set);
+ xen_sr_set_free(sr_set);
+ while(--count) VIR_FREE(names[count]);
+ return -1;
+ }
+ }
+ xen_vdi_set_free(vdi_set);
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+ }
+ xen_sr_set_free(sr_set);
+ return count;
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return -1;
+}
+
+/*
+*XenapiStoragePoolIsActive
+*
+*Determine if the storage pool is currently running
+*
+*/
+static int
+xenapiStoragePoolIsActive(virStoragePoolPtr pool)
+{
+ xen_sr sr=NULL;
+ xen_pbd_set *pbd_set=NULL;
+ virConnectPtr conn=pool->conn;
+ char uuid[VIR_UUID_STRING_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuid);
+
+ if (xen_sr_get_by_uuid(session, &sr, uuid)) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr) && pbd_set->size>0) {
+ bool currently_attached;
+ xen_pbd_get_currently_attached(session, ¤tly_attached, pbd_set->contents[0]);
+ xen_pbd_set_free(pbd_set);
+ xen_sr_free(sr);
+ if (currently_attached == 1)
+ return 1; /* running */
+ else
+ return 0; /* not running */
+ } else {
+ if (pbd_set) {
+ xen_pbd_set_free(pbd_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Physical Block Device not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_sr_free(sr);
+ return -1;
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return -1;
+}
+
+/*
+*XenapiStoragePoolLookupByUUID
+*
+*Lookup the storage pool by UUID
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByUUID (virConnectPtr conn,
+ const unsigned char * uuid)
+{
+ xen_sr sr = NULL;
+ xen_sr_record *record = NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ virStoragePoolPtr pool = NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr) && sr) {
+ if (xen_sr_get_record(session, &record, sr)) {
+ pool = virGetStoragePool(conn, record->name_label, uuid);
+ if (!pool) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get storage pool pointer");
+ xen_sr_record_free(record);
+ return pool;
+ }
+ xen_sr_record_free(record);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_sr_free(sr);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, NULL);
+ }
+ return pool;
+}
+
+
+/*
+*XenapiStoragePoolGetInfo
+*
+*Get information regarding the given storage pool
+*
+*/
+static int
+xenapiStoragePoolGetInfo (virStoragePoolPtr pool,
+ virStoragePoolInfoPtr info)
+{
+ xen_sr_record *record=NULL;
+ xen_sr sr=NULL;
+ virConnectPtr conn = pool->conn;
+ int state = -1;
+ char uuid[VIR_UUID_STRING_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuid);
+ if (xen_sr_get_by_uuid(session, &sr, uuid) && sr) {
+ if (xen_sr_get_record(session, &record, sr)) {
+ info->capacity = record->physical_size;
+ info->allocation = record->virtual_allocation;
+ info->available = record->physical_size - record->physical_utilisation;
+ state = xenapiStoragePoolIsActive(pool);
+ if(state == 1) info->state = VIR_STORAGE_POOL_RUNNING;
+ else if(state == 0) info->state = VIR_STORAGE_POOL_INACTIVE;
+ xen_sr_record_free(record);
+ return 0;
+ } else {
+ xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_POOL, NULL);
+ return -1;
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return -1;
+ }
+}
+
+/*
+*XenapiStoragePoolLookupByVolume
+*
+*Lookup storage pool from the volume given
+*
+*/
+static virStoragePoolPtr
+xenapiStoragePoolLookupByVolume (virStorageVolPtr vol)
+{
+ xen_sr_record *record=NULL;
+ xen_sr_set *sr_set=NULL;
+ xen_sr sr=NULL;
+ virStoragePoolPtr poolPtr=NULL;
+ virConnectPtr conn = vol->conn;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_sr_get_by_name_label(session, &sr_set, vol->pool) && sr_set->size>0) {
+ if (sr_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool name is not unique");
+ xen_sr_set_free(sr_set);
+ return NULL;
+ }
+ sr = sr_set->contents[0];
+ xen_sr_get_record(session, &record, sr);
+ if (record!=NULL) {
+ virUUIDParse(record->uuid,raw_uuid);
+ poolPtr = virGetStoragePool(conn,(const char *)record->name_label, raw_uuid);
+ if (poolPtr != NULL) {
+ xen_sr_record_free(record);
+ xen_sr_set_free(sr_set);
+ return poolPtr;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool pointer unavailable");
+ }
+ xen_sr_record_free(record);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_sr_set_free(sr_set);
+ } else {
+ if (sr_set) {
+ xen_sr_set_free(sr_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Pool not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return NULL;
+}
+
+/*
+*XenapiStorageVolLookupByName
+*
+*Lookup Storage volume by unique name
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByName (virStoragePoolPtr pool,
+ const char *name)
+{
+ xen_vdi_set *vdi_set=NULL;
+ xen_vdi vdi=NULL;
+ virStorageVolPtr volPtr=NULL;
+ virConnectPtr conn = pool->conn;
+ char *uuid=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_vdi_get_by_name_label(session, &vdi_set, (char *)name) && vdi_set->size>0) {
+ if (vdi_set->size!=1) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume name is not unique");
+ xen_vdi_set_free(vdi_set);
+ return NULL;
+ }
+ vdi = vdi_set->contents[0];
+ if (xen_vdi_get_uuid(session, &uuid, vdi)) {
+ volPtr = virGetStorageVol(conn, pool->name, name, uuid);
+ if (!volPtr) xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume pointer not available");
+ VIR_FREE(uuid);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't find the Unique key of the Storage Volume specified");
+ }
+ xen_vdi_set_free(vdi_set);
+ } else {
+ if (vdi_set) {
+ xen_vdi_set_free(vdi_set);
+ xenapiSessionErrorHandler(conn, VIR_ERR_NO_STORAGE_VOL, "Storage Volume not found");
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ return volPtr;
+}
+
+/*
+*XenapiStorageVolGetInfo
+*
+*Get information about the given storage volume
+*
+*/
+static int
+xenapiStorageVolGetInfo (virStorageVolPtr vol,
+ virStorageVolInfoPtr info)
+{
+ virConnectPtr conn = vol->conn;
+ xen_vdi vdi=NULL;
+ xen_vdi_record *record=NULL;
+ xen_sr sr=NULL;
+ xen_sr_record *sr_record=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ int ret=-1;
+ //char uuid[VIR_UUID_STRING_BUFLEN];
+ //virUUIDFormat((unsigned char *)vol->key,uuid);
+
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (xen_vdi_get_record(session, &record, vdi)) {
+ info->capacity = record->virtual_size;
+ info->allocation = record->physical_utilisation;
+ if (xen_vdi_get_sr(session, &sr, vdi)) {
+ if (xen_sr_get_record(session, &sr_record, sr)) {
+ info->type = getStorageVolumeType(sr_record->type);
+ xen_sr_record_free(sr_record);
+ ret=0;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_record_free(record);
+ } else {
+ xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return ret;
+}
+
+static int
+xenapiStoragePoolIsPersistent (virStoragePoolPtr pool ATTRIBUTE_UNUSED)
+{
+ return 1; /* Storage Pool is always persistent */
+}
+
+
+/*
+*XenapiStorageVolGetXMLDesc
+*
+*Get Storage Volume configuration as XML
+*
+*/
+static char *
+xenapiStorageVolGetXMLDesc (virStorageVolPtr vol, unsigned int flags ATTRIBUTE_UNUSED)
+{
+ virBuffer buf = VIR_BUFFER_INITIALIZER;
+ virConnectPtr conn = vol->conn;
+ xen_vdi vdi=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_record *record=NULL;
+ char *sr_uuid =NULL, *srname=NULL, *xml=NULL, *poolXml=NULL;
+ unsigned char raw_uuid[VIR_UUID_BUFLEN];
+ virStorageVolDefPtr vdef=NULL;
+ virStoragePoolDefPtr pdef=NULL;
+ virStoragePoolPtr pool=NULL;
+
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (!xen_vdi_get_record(session, &record, vdi)) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get Volume information");
+ xen_vdi_free(vdi);
+ virBufferFreeAndReset(&buf);
+ return NULL;
+ }
+ if (VIR_ALLOC(vdef)<0) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ return NULL;
+ }
+ if (!(vdef->name = strdup(record->name_label))) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ if (!(vdef->key = strdup(record->uuid))) {
+ virReportOOMError();
+ virBufferFreeAndReset(&buf);
+ xen_vdi_record_free(record);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ vdef->allocation = record->virtual_size;
+ vdef->capacity = record->physical_utilisation;
+
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_uuid(session, &sr_uuid, sr)) {
+ virBufferVSprintf(&buf, "/%s/%s", sr_uuid, record->uuid);
+ vdef->target.path = virBufferContentAndReset(&buf);
+ }
+ xen_sr_get_name_label(session, &srname, sr);
+ if (sr) xen_sr_free(sr);
+ xen_vdi_record_free(record);
+
+ virUUIDParse(sr_uuid, raw_uuid);
+ if(!(pool = virGetStoragePool(conn, srname, raw_uuid))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Could get storage pool pointer");
+ VIR_FREE(srname);
+ VIR_FREE(sr_uuid);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ VIR_FREE(srname);
+ VIR_FREE(sr_uuid);
+ if (!(poolXml = xenapiStoragePoolGetXMLDesc(pool, 0))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't get Storage Pool XML");
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ if(!(pdef = virStoragePoolDefParseString(poolXml))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't parse Storage Pool XML");
+ VIR_FREE(poolXml);
+ virStorageVolDefFree(vdef);
+ return NULL;
+ }
+ VIR_FREE(poolXml);
+ if(!(xml = virStorageVolDefFormat(pdef, vdef))) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Couldn't convert Storage Volume info to XML");
+ virStorageVolDefFree(vdef);
+ virStoragePoolDefFree(pdef);
+ return NULL;
+ }
+ virStorageVolDefFree(vdef);
+ virStoragePoolDefFree(pdef);
+ return xml;
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ return NULL;
+ }
+}
+
+/*
+*XenapiStorageVolLookupByPath
+*
+*Lookup Storage Volume for the given path
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByPath (virConnectPtr conn,
+ ATTRIBUTE_UNUSED const char * path)
+{
+ xen_sr sr=NULL;
+ xen_vdi vdi=NULL;
+ virStorageVolPtr volPtr=NULL;
+ char *srname=NULL,*vname=NULL;
+ char sruuid[VIR_UUID_STRING_BUFLEN]="\0", vuuid[VIR_UUID_STRING_BUFLEN]="\0";
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ sscanf(path,"/%[^/]/%[^/]",sruuid,vuuid);
+ if (STREQ(sruuid,"\0") || STREQ(vuuid,"\0")) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Invalid path");
+ return NULL;
+ }
+ if (xen_sr_get_by_uuid(session, &sr, sruuid) && xen_sr_get_name_label(session, &srname, sr)) {
+ if (xen_vdi_get_by_uuid(session, &vdi, vuuid) && xen_vdi_get_name_label(session, &vname, vdi)) {
+ if (!(volPtr = virGetStorageVol(conn, srname, vname, vuuid)))
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume pointer not available");
+ VIR_FREE(vname);
+ xen_vdi_free(vdi);
+ } else {
+ if (vdi) xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ VIR_FREE(srname);
+ xen_sr_free(sr);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return volPtr;
+}
+
+/*
+*XenapiStorageVolGetPath
+*
+*Get path for the specified storage volume
+*
+*/
+static char *
+xenapiStorageVolGetPath (virStorageVolPtr vol)
+{
+ xen_vdi vdi=NULL;
+ virConnectPtr conn = vol->conn;
+ virBuffer path = VIR_BUFFER_INITIALIZER;
+ xen_sr sr=NULL;
+ char *sruuid=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+
+ if (xen_vdi_get_by_uuid(session, &vdi, vol->key)) {
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_uuid(session, &sruuid, sr)) {
+ virBufferVSprintf(&path,"/%s/%s",sruuid,vol->key);
+ VIR_FREE(sruuid);
+ xen_sr_free(sr);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_free(vdi);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return virBufferContentAndReset(&path);
+}
+
+static int
+xenapiStoragePoolRefresh ( ATTRIBUTE_UNUSED virStoragePoolPtr pool,
+ ATTRIBUTE_UNUSED unsigned int flags)
+{
+ return 0;
+}
+
+/*
+*XenapiStorageVolLookupByKey
+*
+*Lookup storage volume for the given key
+*
+*/
+static virStorageVolPtr
+xenapiStorageVolLookupByKey (virConnectPtr conn, const char * key)
+{
+ xen_vdi vdi=NULL;
+ xen_sr sr=NULL;
+ xen_vdi_record *vrecord=NULL;
+ xen_sr_record *srecord=NULL;
+ virStorageVolPtr volPtr=NULL;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ if (xen_vdi_get_by_uuid(session, &vdi, (char *)key) && xen_vdi_get_record(session, &vrecord, vdi)) {
+ if (xen_vdi_get_sr(session, &sr, vdi) && xen_sr_get_record(session, &srecord, sr)) {
+ volPtr = virGetStorageVol(conn, srecord->name_label, vrecord->name_label, key);
+ if (!volPtr)
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Storage Volume Pointer not available");
+ xen_sr_record_free(srecord);
+ } else {
+ if (sr) xen_sr_free(sr);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_vdi_record_free(vrecord);
+ } else {
+ if (vdi) xen_vdi_free(vdi);
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return volPtr;
+}
+
+
+/*
+*XenapiStoragePoolDestroy
+*
+*unplug PBDs connected to the specified storage pool
+*
+*/
+static int
+xenapiStoragePoolDestroy (virStoragePoolPtr pool)
+{
+ xen_sr sr=NULL;
+ xen_pbd pbd=NULL;
+ char uuidStr[VIR_UUID_STRING_BUFLEN];
+ struct xen_pbd_set *pbd_set=NULL;
+ int i,ret=-1;
+ virConnectPtr conn = pool->conn;
+ xen_session *session = ((struct _xenapiStoragePrivate *)(conn->storagePrivateData))->session;
+ virUUIDFormat(pool->uuid,uuidStr);
+ if (xen_sr_get_by_uuid(session, &sr, uuidStr)) {
+ if (xen_sr_get_pbds(session, &pbd_set, sr) && pbd_set->size>0) {
+ for (i=0;i<pbd_set->size;i++) {
+ pbd = pbd_set->contents[0];
+ if (xen_pbd_unplug(session, pbd))
+ ret=0;
+ else
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ xen_pbd_set_free(pbd_set);
+ } else {
+ if (pbd_set) {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "There are no PBDs in the specified pool to unplug");
+ xen_pbd_set_free(pbd_set);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ }
+ xen_sr_free(sr);
+ } else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ }
+ return ret;
+}
+
+static virStorageDriver xenapiStorageDriver = {
+ "XenAPI Storage",
+ xenapiStorageOpen,
+ xenapiStorageClose,
+ xenapiNumOfStoragePools,
+ xenapiListStoragePools,
+ xenapiNumOfDefinedStoragePools,
+ xenapiListDefinedStoragePools,
+ NULL,
+ xenapiStoragePoolLookupByName,
+ xenapiStoragePoolLookupByUUID,
+ xenapiStoragePoolLookupByVolume,
+ xenapiStoragePoolCreateXML,
+ NULL,
+ xenapiStoragePoolBuild,
+ NULL,
+ xenapiStoragePoolCreate,
+ xenapiStoragePoolDestroy,
+ NULL,
+ xenapiStoragePoolRefresh,
+ xenapiStoragePoolGetInfo,
+ xenapiStoragePoolGetXMLDesc,
+ xenapiStoragePoolGetAutostart,
+ xenapiStoragePoolSetAutostart,
+ xenapiStoragePoolNumOfVolumes,
+ xenapiStoragePoolListVolumes,
+ xenapiStorageVolLookupByName,
+ xenapiStorageVolLookupByKey,
+ xenapiStorageVolLookupByPath,
+ NULL,
+ NULL,
+ NULL,
+ xenapiStorageVolGetInfo,
+ xenapiStorageVolGetXMLDesc,
+ xenapiStorageVolGetPath,
+ xenapiStoragePoolIsActive,
+ xenapiStoragePoolIsPersistent
+};
+
+
+/*
+*XenapiStorageRegister
+*
+*Register the storage driver APIs
+*
+*/
+int
+xenapiStorageRegister (void)
+{
+ return virRegisterStorageDriver(&xenapiStorageDriver);
+}
+
+
+
+
--- ./libvirt_org/src/xenapi/xenapi_storage_driver.h 1970-01-01 01:00:00.000000000 +0100
+++ ./libvirt/src/xenapi/xenapi_storage_driver.h 2010-03-11 12:46:00.000000000 +0000
@@ -0,0 +1,42 @@
+/*
+ * xenapi_storage_driver.h: Xen API Storage Driver header file
+ * Copyright (C) 2009, 2010 Citrix Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Sharadha Prabhakar <sharadha.prabhakar(a)citrix.com>
+ */
+
+
+#ifndef __VIR_XENAPI_STORAGE_H__
+#define __VIR_XENAPI_STORAGE_H__
+
+#include <xen/api/xen_common.h>
+#include <libxml/tree.h>
+
+
+
+/* XenAPI storage driver's private data structure */
+struct _xenapiStoragePrivate {
+ xen_session *session;
+ char *url;
+ int noVerify;
+ virCapsPtr caps;
+};
+
+
+
+
+#endif /* __VIR_XENAPI_STORAGE_H__ */
--- ./src/xenapi/xenapi_utils.h_orig 2010-03-24 15:38:59.000000000 +0000
+++ ./src/xenapi/xenapi_utils.h 2010-03-23 10:44:38.000000000 +0000
@@ -56,8 +56,12 @@
#include "buf.h"
#define NETWORK_DEVID_SIZE (12)
+#define STORAGE_DEVID_SIZE (12)
typedef uint64_t cpumap_t;
+//newly added
+int
+createVbdStorage (virConnectPtr conn, xen_vm vm, int device, char *path, int devtype);
void
xenSessionFree(xen_session *session);
--- ./src/xenapi/xenapi_utils.c_orig 2010-03-24 15:32:28.000000000 +0000
+++ ./src/xenapi/xenapi_utils.c 2010-03-24 15:09:41.000000000 +0000
@@ -53,6 +53,7 @@
#include "xenapi_utils.h"
#include "util/logging.h"
#include "qparams.h"
+#include "xenapi_storage_driver.h"
void
xenSessionFree(xen_session *session)
@@ -390,17 +391,96 @@
const char *buf, const char *filename, const char *func, size_t lineno)
{
struct _xenapiPrivate *priv = conn->privateData;
-
- if (buf == NULL && priv != NULL && priv->session != NULL) {
- char *ret = returnErrorFromSession(priv->session);
- virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
- xen_session_clear_error(priv->session);
- VIR_FREE(ret);
+ struct _xenapiStoragePrivate *privS = conn->storagePrivateData;
+ char *ret = NULL;
+ if (buf == NULL) {
+ if (priv != NULL && priv->session != NULL) {
+ if (!priv->session->ok) {
+ ret = returnErrorFromSession(priv->session);
+ virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
+ xen_session_clear_error(priv->session);
+ VIR_FREE(ret);
+ }
+ }
+ if (privS != NULL && privS->session !=NULL) {
+ if (!privS->session->ok) {
+ ret = returnErrorFromSession(privS->session);
+ virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), ret);
+ xen_session_clear_error(privS->session);
+ VIR_FREE(ret);
+ }
+ }
} else {
virReportErrorHelper(conn, VIR_FROM_XENAPI, errNum, filename, func, lineno, _("%s"), buf);
}
}
+/* create VBDs for VM */
+int
+createVbdStorage (virConnectPtr conn, xen_vm vm, int device, char *path, int devtype)
+{
+ xen_vm xvm=NULL;
+ xen_vdi vdi=NULL;
+ xen_vbd vbd=NULL;
+ char *vmuuid=NULL;
+ char userdevice[STORAGE_DEVID_SIZE]="\0";
+ xen_vbd_record *record=NULL;
+ xen_session *session = ((struct _xenapiPrivate *)(conn->privateData))->session;
+ char sruuid[VIR_UUID_STRING_BUFLEN]="\0", voluuid[VIR_UUID_STRING_BUFLEN]="\0";
+ if (sscanf(path,"/%[^/]/%[^/]",sruuid,voluuid)!=2)
+ return -1;
+ fprintf(stderr,"\nsruuid: %s\nvoluuid: %s",sruuid,voluuid);
+ if (!xen_vm_get_uuid(session, &vmuuid, vm))
+ return -1;
+ if (!xen_vm_get_by_uuid(session, &xvm, vmuuid)){
+ VIR_FREE(vmuuid);
+ return -1;
+ }
+ VIR_FREE(vmuuid);
+ if (!xen_vdi_get_by_uuid(session, &vdi, voluuid)) {
+ xen_vm_free(xvm);
+ return -1;
+ }
+ sprintf(userdevice,"%d",device);
+ xen_vm_record_opt *vm_opt = xen_vm_record_opt_alloc();
+ vm_opt->is_record = 0;
+ vm_opt->u.handle = xvm;
+
+ xen_vdi_record_opt *vdi_opt = xen_vdi_record_opt_alloc();
+ vdi_opt->is_record = 0;
+ vdi_opt->u.handle = vdi;
+
+ record = xen_vbd_record_alloc();
+ record->vm = vm_opt;
+ record->vdi = vdi_opt;
+ if (!(record->userdevice = strdup(userdevice))) {
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ record->other_config = xen_string_string_map_alloc(0);
+ record->runtime_properties = xen_string_string_map_alloc(0);
+ record->qos_algorithm_params = xen_string_string_map_alloc(0);
+ if (devtype == VIR_DOMAIN_DISK_DEVICE_DISK)
+ record->type = XEN_VBD_TYPE_DISK;
+ else if (devtype == VIR_DOMAIN_DISK_DEVICE_CDROM)
+ record->type = XEN_VBD_TYPE_CD;
+ else {
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, "Only CDROM and HardDisk supported");
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ if (!xen_vbd_create(session, &vbd, record)){
+ xenapiSessionErrorHandler(conn, VIR_ERR_INTERNAL_ERROR, NULL);
+ xen_vbd_record_free(record);
+ return -1;
+ }
+ xen_vbd_record_free(record);
+
+ return 0;
+}
+
+
+
/* creates network intereface for VM */
int
createVifNetwork (virConnectPtr conn, xen_vm vm, char *device,
@@ -557,6 +637,7 @@
int device_number=0;
char *bridge=NULL,*mac=NULL;
int i;
+ //support for network interfaces
for (i=0;i<def->nnets;i++) {
if (def->nets[i]->type == VIR_DOMAIN_NET_TYPE_BRIDGE) {
if (def->nets[i]->data.bridge.brname)
@@ -580,6 +661,13 @@
if (bridge) VIR_FREE(bridge);
}
}
+ //support for disks here
+ for (i=0;i<def->ndisks;i++) {
+ if (createVbdStorage(conn, *vm, i, def->disks[i]->src, def->disks[i]->device)!= 0) {
+ xen_vm_record_free(*record);
+ return -1;
+ }
+ }
return 0;
error_cleanup:
--- ../libvirt_org/src/conf/storage_conf.c 2010-02-17 17:38:05.000000000 +0000
+++ ./src/conf/storage_conf.c 2010-03-22 15:08:36.000000000 +0000
@@ -61,7 +61,7 @@
VIR_ENUM_IMPL(virStoragePoolFormatFileSystemNet,
VIR_STORAGE_POOL_NETFS_LAST,
- "auto", "nfs", "glusterfs")
+ "auto", "nfs", "nfs-iso", "cifs-iso", "glusterfs")
VIR_ENUM_IMPL(virStoragePoolFormatDisk,
VIR_STORAGE_POOL_DISK_LAST,
--- ../libvirt_org/src/conf/storage_conf.h 2010-02-17 17:38:06.000000000 +0000
+++ ./src/conf/storage_conf.h 2010-03-22 14:01:02.000000000 +0000
@@ -404,6 +404,8 @@
enum virStoragePoolFormatFileSystemNet {
VIR_STORAGE_POOL_NETFS_AUTO = 0,
VIR_STORAGE_POOL_NETFS_NFS,
+ VIR_STORAGE_POOL_NETFS_NFS_ISO,
+ VIR_STORAGE_POOL_NETFS_CIFS_ISO,
VIR_STORAGE_POOL_NETFS_GLUSTERFS,
VIR_STORAGE_POOL_NETFS_LAST,
};
--- ./src/Makefile.am_04mar 2010-03-05 10:55:04.000000000 +0000
+++ ./src/Makefile.am 2010-03-23 18:11:50.000000000 +0000
@@ -210,7 +211,9 @@
XENAPI_DRIVER_SOURCES = \
xenapi/xenapi_driver.c xenapi/xenapi_driver.h \
xenapi_driver_private.h \
- xenapi/xenapi_utils.c xenapi/xenapi_utils.h
+ xenapi/xenapi_utils.c xenapi/xenapi_utils.h \
+ xenapi/xenapi_storage_driver.c \
+ xenapi/xenapi_storage_driver.h
UML_DRIVER_SOURCES = \
uml/uml_conf.c uml/uml_conf.h \
--- ../libvirt_org/src/libvirt.c 2010-02-17 17:38:08.000000000 +0000
+++ ./src/libvirt.c 2010-03-11 12:14:33.000000000 +0000
@@ -377,6 +381,10 @@
#ifdef WITH_ESX
if (esxRegister() == -1) return -1;
#endif
#ifdef WITH_XENAPI
if (xenapiRegister () == -1) return -1;
+ if (xenapiStorageRegister () == -1) return -1;
#endif
#ifdef WITH_REMOTE
if (remoteRegister () == -1) return -1;
#endif
--- ./src/xenapi/xenapi_driver.h_orig 2010-03-23 19:00:14.000000000 +0000
+++ ./src/xenapi/xenapi_driver.h 2010-03-11 11:11:01.000000000 +0000
@@ -25,5 +25,6 @@
extern int xenapiRegister (void);
+extern int xenapiStorageRegister (void);
#endif /* __VIR_XENAPI_PRIV_H__ */
14 years, 8 months