On 09/09/2012 04:44 AM, Matthias Bolte wrote:
---
v2: Use network level VLAN config if there is no portgroup specific VLAN
config given.
v3: Add ESX_VLAN_TRUNK define for magic number 4095
src/esx/esx_network_driver.c | 70 +++++++++++++++++++++++++++++++++++++++---
1 files changed, 65 insertions(+), 5 deletions(-)
diff --git a/src/esx/esx_network_driver.c b/src/esx/esx_network_driver.c
index 09d46d3..21eabbe 100644
--- a/src/esx/esx_network_driver.c
+++ b/src/esx/esx_network_driver.c
@@ -45,6 +45,9 @@
*/
verify(MD5_DIGEST_SIZE == VIR_UUID_BUFLEN);
+/* ESX utilizes the VLAN ID of 4095 to mean that this port is in trunk mode */
+#define ESX_VLAN_TRUNK 4095
+
static virDrvOpenStatus
@@ -489,7 +492,42 @@ esxNetworkDefineXML(virConnectPtr conn, const char *xml)
goto cleanup;
}
- hostPortGroupSpec->vlanId->value = 0;
+ if (def->portGroups[i].vlan.trunk) {
+ /* FIXME: Change this once tag-less trunk-mode is supported */
I've lost the context (if there was any) of this comment. Is this
something that libvirt needs to support? Or ESX?
+ if (def->portGroups[i].vlan.nTags != 1 ||
+ *def->portGroups[i].vlan.tag != ESX_VLAN_TRUNK) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("VLAN tag has to be %d in trunk mode"),
+ ESX_VLAN_TRUNK);
This seems a bit odd. If ESX requires setting the vlan tag to 4095 to
indicate trunk mode, why not just have trunk='yes' imply a tag id of
4095? While not exactly the same as trunk mode for Open vSwitch, it
seems closer at least. (hmm, of course I think currently the parser
requires at least one tag id, even for trunk='yes'. Maybe that should be
relaxed in the parser, and enforced in each driver as necessary? Is that
what the comment above is referring to? If so, I would say "go for it" :-))
+ goto cleanup;
+ }
+
+ hostPortGroupSpec->vlanId->value = ESX_VLAN_TRUNK;
+ } else if (def->portGroups[i].vlan.nTags == 1) {
+ hostPortGroupSpec->vlanId->value = *def->portGroups[i].vlan.tag;
+ } else if (def->portGroups[i].vlan.nTags > 1) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Can apply one VLAN tag per port group only"));
+ goto cleanup;
+ } else if (def->vlan.trunk) {
+ /* FIXME: Change this once tag-less trunk-mode is supported */
+ if (def->vlan.nTags != 1 || *def->vlan.tag !=
ESX_VLAN_TRUNK) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+ _("VLAN tag has to be %d in trunk mode"),
+ ESX_VLAN_TRUNK);
+ goto cleanup;
+ }
+
+ hostPortGroupSpec->vlanId->value = ESX_VLAN_TRUNK;
+ } else if (def->vlan.nTags == 1) {
+ hostPortGroupSpec->vlanId->value = *def->vlan.tag;
+ } else if (def->vlan.nTags > 1) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("Can apply one VLAN tag per port group only"));
+ goto cleanup;
+ } else {
+ hostPortGroupSpec->vlanId->value = 0;
+ }
if (def->portGroups[i].bandwidth != NULL) {
if (esxBandwidthToShapingPolicy
@@ -519,6 +557,8 @@ esxNetworkDefineXML(virConnectPtr conn, const char *xml)
network = virGetNetwork(conn, hostVirtualSwitch->name, md5);
cleanup:
+ /* FIXME: need to remove virtual switch if adding port groups failed */
+
virNetworkDefFree(def);
esxVI_HostVirtualSwitch_Free(&hostVirtualSwitch);
esxVI_HostPortGroup_Free(&hostPortGroupList);
@@ -695,6 +735,7 @@ esxNetworkGetXMLDesc(virNetworkPtr network_, unsigned int flags)
esxVI_String *hostPortGroupKey = NULL;
esxVI_String *networkName = NULL;
virNetworkDefPtr def;
+ virPortGroupDefPtr portGroup;
if (esxVI_EnsureSession(priv->primary) < 0) {
return NULL;
@@ -824,9 +865,12 @@ esxNetworkGetXMLDesc(virNetworkPtr network_, unsigned int flags)
for (networkName = networkNameList; networkName != NULL;
networkName = networkName->_next) {
if (STREQ(networkName->value,
hostPortGroup->spec->name)) {
- def->portGroups[def->nPortGroups].name =
strdup(networkName->value);
+ portGroup = &def->portGroups[def->nPortGroups];
+ ++def->nPortGroups;
+
+ portGroup->name = strdup(networkName->value);
- if (def->portGroups[def->nPortGroups].name == NULL) {
+ if (portGroup->name == NULL) {
virReportOOMError();
goto cleanup;
}
@@ -834,13 +878,29 @@ esxNetworkGetXMLDesc(virNetworkPtr network_, unsigned int flags)
if (hostPortGroup->spec->policy != NULL) {
if (esxShapingPolicyToBandwidth
(hostPortGroup->spec->policy->shapingPolicy,
-
&def->portGroups[def->nPortGroups].bandwidth) < 0) {
+ &portGroup->bandwidth) < 0) {
++def->nPortGroups;
goto cleanup;
}
}
- ++def->nPortGroups;
+ if (hostPortGroup->spec->vlanId->value > 0) {
+ if (hostPortGroup->spec->vlanId->value ==
ESX_VLAN_TRUNK) {
+ portGroup->vlan.trunk = true;
+ }
+
+ /* FIXME: Remove this once tag-less trunk-mode is
supported */
+ portGroup->vlan.nTags = 1;
+
+ if (VIR_ALLOC_N(portGroup->vlan.tag, 1) < 0) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ *portGroup->vlan.tag =
+ hostPortGroup->spec->vlanId->value;
+ }
+
break;
}
}
It all looks fine to me (although I can't check it myself), but I think
if there's a simple solution to automatically setting the tag id sent to
ESX to "ESX_VLAN_TRUNK" just by setting "trunk='yes'", better
to do that
now than later, saving the backward compatibility headaches (and it's
okay with me if the parser is changed to allow 0 tags when trunk='yes',
as long as openvswitch's (and hostdev's) use of vlan are updated to
disallow 0 tags even for trunk='yes' (if they don't already - I haven't
checked).
If I'm misunderstanding the issue and this can't be easily done, then
ACK as-is.