[PATCH] Add check to catch duplicated VirtualDevice parameter in DiskRASDs
by Richard Maciel
# HG changeset patch
# User Richard Maciel <rmaciel(a)linux.vnet.ibm.com>
# Date 1248800674 10800
# Node ID 4ff0f15857f88d54dac54c149da08f0d0b1052f1
# Parent b4da93f35181591b2a225c9e8377a426312270b7
Add check to catch duplicated VirtualDevice parameter in DiskRASDs
At the moment of creation of a guest, it is necessary to check if its DiskRASDs all have unique (among themselves) VirtualDevice parameters
Signed-off-by: Richard Maciel <rmaciel(a)linux.vnet.ibm.com>
diff -r b4da93f35181 -r 4ff0f15857f8 src/Virt_VirtualSystemManagementService.c
--- a/src/Virt_VirtualSystemManagementService.c Tue Jul 28 06:10:38 2009 -0300
+++ b/src/Virt_VirtualSystemManagementService.c Tue Jul 28 14:04:34 2009 -0300
@@ -1011,6 +1011,11 @@
for (i = 0; i < *index; i++) {
struct virt_device *ptr = &list[i];
+
+ if (STREQC(ptr->dev.disk.virtual_dev, dev->dev.disk.virtual_dev))
+ return "VirtualDevice property must be unique for each "
+ "DiskResourceAllocationSettingData in a single "
+ "guest";
if (STREQC(ptr->id, dev->id)) {
CU_DEBUG("Overriding device %s from refconf", ptr->id);
15 years, 3 months
[PATCH] VirtualDevice default value for a cdrom template rasd changed from hda to hdc
by Richard Maciel
# HG changeset patch
# User Richard Maciel <rmaciel(a)linux.vnet.ibm.com>
# Date 1248772238 10800
# Node ID b4da93f35181591b2a225c9e8377a426312270b7
# Parent bdf6eda6765f09c27f7306cd8c074ca5acda38a6
VirtualDevice default value for a cdrom template rasd changed from hda to hdc
Signed-off-by: Richard Maciel <rmaciel(a)linux.vnet.ibm.com>
diff -r bdf6eda6765f -r b4da93f35181 src/Virt_SettingsDefineCapabilities.c
--- a/src/Virt_SettingsDefineCapabilities.c Thu Jul 30 15:59:26 2009 -0700
+++ b/src/Virt_SettingsDefineCapabilities.c Tue Jul 28 06:10:38 2009 -0300
@@ -782,13 +782,6 @@
CMPIInstance *inst;
CMPIStatus s = {CMPI_RC_OK, NULL};
- if (type == DOMAIN_LXC) {
- dev = "/lxc_mnt/tmp";
- }
- else {
- dev = "hda";
- }
-
inst = sdc_rasd_inst(&s, ref, CIM_RES_TYPE_DISK, DEVICE_RASD);
if ((inst == NULL) || (s.rc != CMPI_RC_OK))
goto out;
@@ -798,12 +791,11 @@
(CMPIValue *)"MegaBytes", CMPI_chars);
CMSetProperty(inst, "Address", (CMPIValue *)disk_path, CMPI_chars);
- if (type == DOMAIN_LXC)
+ if (type == DOMAIN_LXC) {
+ dev = "/lxc_mnt/tmp";
CMSetProperty(inst, "MountPoint", (CMPIValue *)dev, CMPI_chars);
- else {
- if (emu_type == 0)
- CMSetProperty(inst, "VirtualQuantity",
- (CMPIValue *)&disk_size, CMPI_uint64);
+ } else {
+ dev = "hda";
if (type == DOMAIN_XENPV) {
dev = "xvda";
@@ -813,6 +805,13 @@
CMSetProperty(inst, "Caption",
(CMPIValue *)"FV disk", CMPI_chars);
}
+
+ if (emu_type == 0) {
+ CMSetProperty(inst, "VirtualQuantity",
+ (CMPIValue *)&disk_size, CMPI_uint64);
+ } else if (emu_type == 1) {
+ dev = "hdc";
+ }
CMSetProperty(inst, "VirtualDevice",
(CMPIValue *)dev, CMPI_chars);
15 years, 3 months
[PATCH] [TEST] Added scsi pool support
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B.Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1248702884 25200
# Node ID 78496dff7dce6045c687d66bd60a066dcf78aca7
# Parent ad67e5d20ee2a268c8f2016004c35bbb890ae94c
[TEST] Added scsi pool support
Tested with current sources on F10 and KVM.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r ad67e5d20ee2 -r 78496dff7dce suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py
--- a/suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py Thu Jul 23 06:29:14 2009 -0700
+++ b/suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py Mon Jul 27 06:54:44 2009 -0700
@@ -36,9 +36,15 @@
# python create_verify_storagepool.py -t logical -d /dev/VolGroup01
# -n VolGroup01 -v Xen -u <username> -p <passwd>
#
+# For scsi pool type with HBA's:
+# ------------------------------
+# python create_verify_storagepool.py -t scsi -v KVM -u <username> -p <passwd>
+# -n myscsi_pool -a host2
+#
# Where t can be :
# 2 - FileSystem
-# 4 - Logical etc
+# 6 - Logical
+# 7 - scsi
#
#
# Date : 27.06.2009
@@ -63,16 +69,15 @@
TEST_LOG="cimtest.log"
libvirt_cim_fs_changes = 857
libvirt_cim_logical_changes = 906
+libvirt_cim_scsi_changes = 921
supp_types = [ 'Xen', 'KVM' , 'LXC' ]
-pool_types = { 'DISK_POOL_FS' : 2 , 'DISK_POOL_LOGICAL' : 6 }
+pool_types = { 'DISK_POOL_FS' : 2 , 'DISK_POOL_LOGICAL' : 6 ,
+ 'DISK_POOL_SCSI' : 7 }
def verify_cmd_options(options, parser):
try:
- if options.part_dev == None:
- raise Exception("Free Partition to be mounted not specified")
-
if options.pool_name == None:
raise Exception("Must specify the Pool Name to be created")
@@ -82,9 +87,15 @@
if options.pool_type == None:
raise Exception("Must specify pool type to be tested")
- if options.mnt_pt == None and options.pool_type != 'logical':
+ if options.part_dev == None and options.pool_type != 'scsi':
+ raise Exception("Free Partition to be mounted not specified")
+
+ if options.mnt_pt == None and options.pool_type == 'fs':
raise Exception("Mount points to be used not specified")
+ if options.adap_name == None and options.pool_type == 'scsi':
+ raise Exception("Adapter name used not specified")
+
except Exception, details:
print "\nFATAL: ", details , "\n"
print parser.print_help()
@@ -114,15 +125,24 @@
pool_type = pool_types['DISK_POOL_FS']
elif pooltype == "logical":
pool_type = pool_types['DISK_POOL_LOGICAL']
+ elif pooltype == "scsi":
+ pool_type = pool_types['DISK_POOL_SCSI']
else:
logger.error("Invalid pool type ....")
return None, None
return PASS, pool_type
-def verify_inputs(part_dev, mount_pt, pool_type, pool_name):
+def verify_inputs(part_dev, mount_pt, pool_type, pool_name, adap_name):
del_dir = False
+ if pool_type == pool_types['DISK_POOL_SCSI']:
+ hba_path = "/sys/class/scsi_host/"
+ adap_path = "%s%s" % (hba_path, adap_name)
+ if not os.path.exists(adap_path):
+ logger.error("HBA '%s' does not exist on the machine, specify "\
+ "one present in '%s' path", adap_path, hba_path)
+ return FAIL, del_dir
- if pool_type == pool_types['DISK_POOL_LOGICAL']:
+ elif pool_type == pool_types['DISK_POOL_LOGICAL']:
if not os.path.exists("/sbin/lvm"):
logger.error("LVM support does not exist on the machine")
return FAIL, del_dir
@@ -142,47 +162,48 @@
return FAIL, del_dir
return PASS, del_dir
+ elif pool_type == pool_types['DISK_POOL_FS']:
+ cmd = "mount"
+ status, mount_info = getstatusoutput(cmd)
+ if status != PASS:
+ logger.error("Failed to get mount info.. ")
+ return FAIL, del_dir
+
+ for line in mount_info.split('\n'):
+ try:
+ # Check if the specified partition is mounted before using it
+ part_name = line.split()[0]
+ if part_dev == part_name:
+ logger.error("[%s] already mounted", part_dev)
+ raise Exception("Please specify free partition other than "\
+ "[%s]" % part_dev)
- cmd = "mount"
- status, mount_info = getstatusoutput(cmd)
- if status != PASS:
- logger.error("Failed to get mount info.. ")
- return FAIL, del_dir
-
- for line in mount_info.split('\n'):
- try:
- # Check if the specified partition is mounted before using it
- part_name = line.split()[0]
- if part_dev == part_name:
- logger.error("[%s] already mounted", part_dev)
- raise Exception("Please specify free partition other than " \
- "[%s]" % part_dev)
+ # Check if mount point is already used for mounting
+ mount_name = line.split()[2]
+ if mount_pt == mount_name:
+ logger.error("[%s] already mounted", mount_pt)
+ raise Exception("Please specify dir other than [%s]" \
+ % mount_pt)
- # Check if mount point is already used for mounting
- mount_name = line.split()[2]
- if mount_pt == mount_name:
- logger.error("[%s] already mounted", mount_pt)
- raise Exception("Please specify dir other than [%s]" %mount_pt)
+ except Exception, details:
+ logger.error("%s", details)
+ return FAIL, del_dir
- except Exception, details:
- logger.error("%s", details)
- return FAIL, del_dir
+ # Check if the mount point specified already exist, if not then create it..
+ if not os.path.exists(mount_pt):
+ os.mkdir(mount_pt)
- # Check if the mount point specified already exist, if not then create it..
- if not os.path.exists(mount_pt):
- os.mkdir(mount_pt)
+ # set del_dir to True so that we remove it before exiting from the tc.
+ del_dir = True
+ else:
+ # Check if the mount point specified is a dir
+ if not os.path.isdir(mount_pt):
+ logger.error("The mount point [%s] should be a dir", mount_pt)
+ return FAIL, del_dir
- # set del_dir to True so that we remove it before exiting from the tc.
- del_dir = True
- else:
- # Check if the mount point specified is a dir
- if not os.path.isdir(mount_pt):
- logger.error("The mount point [%s] should be a dir", mount_pt)
- return FAIL, del_dir
-
- files = os.listdir(mount_pt)
- if len(files) != 0:
- logger.info("The mount point [%s] given is not empty", mount_pt)
+ files = os.listdir(mount_pt)
+ if len(files) != 0:
+ logger.info("The mount point [%s] given is not empty", mount_pt)
return PASS, del_dir
@@ -195,7 +216,8 @@
vuri = 'lxc:///system'
return vuri
-def get_pool_settings(dp_rasds, pooltype, part_dev, mount_pt, pool_name):
+def get_pool_settings(dp_rasds, pooltype, part_dev, mount_pt,
+ pool_name, adap_name):
pool_settings = None
for dpool_rasd in dp_rasds:
if dpool_rasd['Type'] == pooltype and \
@@ -207,6 +229,9 @@
dpool_rasd['DevicePaths'] = [part_dev]
elif pooltype == pool_types['DISK_POOL_LOGICAL']:
dpool_rasd['Path'] = part_dev
+ if pooltype == pool_types['DISK_POOL_SCSI']:
+ dpool_rasd['AdapterName'] = adap_name
+ dpool_rasd['Path'] = "/dev/disk/by-id"
break
if not pool_name in dpool_rasd['InstanceID']:
@@ -270,7 +295,7 @@
parser.add_option("-v", "--virt-type", dest="virt", default=None,
help="Virtualization type [ Xen | KVM ]")
parser.add_option("-t", "--pool-type", dest="pool_type", default=None,
- help="Pool type:[ fs | logical ]")
+ help="Pool type:[ fs | logical | scsi ]")
parser.add_option("-d", "--part-dev", dest="part_dev", default=None,
help="specify the free partition to be used for " \
"fs pool type or the predefined Vol Group" \
@@ -279,6 +304,9 @@
help="Mount point to be used")
parser.add_option("-n", "--pool-name", dest="pool_name", default=None,
help="Pool to be created")
+ parser.add_option("-a", "--adap_name", dest="adap_name", default=None,
+ help="Adap name to be used Ex: specify one of the host" \
+ "in /sys/class/scsi_host/ like host0")
parser.add_option("-c", "--clean-log", action="store_true", dest="clean",
help="Will remove existing log files before test run")
parser.add_option("-l", "--debug-output", action="store_true", dest="debug",
@@ -294,6 +322,7 @@
part_dev = options.part_dev
mount_pt = options.mnt_pt
pool_name = options.pool_name
+ adap_name = options.adap_name
virt = options.virt
if ":" in options.h_url:
@@ -301,7 +330,7 @@
else:
sysname = options.h_url
- # Verify if the CIMOM is running, clean cimtest.log if requested
+ # Verify if the CIMOM is running, if requested clean cimtest.log.
# Set Debug option if requested
status = env_setup(sysname, virt, options.clean, options.debug)
if status != PASS:
@@ -322,20 +351,27 @@
curr_cim_rev, changeset = get_provider_version(virt, sysname)
if curr_cim_rev < libvirt_cim_fs_changes and \
pooltype == pool_types['DISK_POOL_FS']:
- logger.info("Test Skipped for %s pool type, Support for File System "\
- "Pool is available in revision %s", options.pool_type,
+ logger.info("Test Skipped for '%s' pool type, Support for File System "
+ "Pool is available in revision '%s'", options.pool_type,
libvirt_cim_fs_changes)
return SKIP
elif curr_cim_rev < libvirt_cim_logical_changes and \
pooltype == pool_types['DISK_POOL_LOGICAL']:
- logger.info("Test Skipped for %s pool type, Support for Logical Pool" \
- " is available in revision %s", options.pool_type, \
+ logger.info("Test Skipped for '%s' pool type, Support for Logical Pool"
+ " is available in revision '%s'", options.pool_type,
libvirt_cim_logical_changes)
return SKIP
+ elif curr_cim_rev < libvirt_cim_scsi_changes and \
+ pooltype == pool_types['DISK_POOL_SCSI']:
+ logger.info("Test Skipped for '%s' pool type, Support for scsi Pool"
+ " is available in revision '%s'", options.pool_type,
+ libvirt_cim_scsi_changes)
+ return SKIP
pooltype = cim_types.Uint16(pooltype)
- status, del_dir = verify_inputs(part_dev, mount_pt, pooltype, pool_name)
+ status, del_dir = verify_inputs(part_dev, mount_pt, pooltype, pool_name,
+ adap_name)
if status != PASS:
if del_dir == True:
cmd ="rm -rf %s" % mount_pt
@@ -370,7 +406,7 @@
# Get the DiskPoolRASD mof with appropriate values of diskpool
# to be created....
pool_settings = get_pool_settings(dp_rasds, pooltype, part_dev,
- mount_pt, pool_name)
+ mount_pt, pool_name, adap_name)
if pool_settings == None:
raise Exception("Did not get the required pool settings ...")
15 years, 3 months
[PATCH] [TEST] Add new tc to verify block backed VM creation
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B.Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1248352829 25200
# Node ID 341282e8f94f7dd9fc758f0298b198ef2576e7b4
# Parent 28340a978f4e8f829d2d8998c04e0528436e1932
[TEST] Add new tc to verify block backed VM creation.
Tested with current sources on F10 and KVM.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 28340a978f4e -r 341282e8f94f suites/libvirt-cim/misc_cimtests/defineStart_blockbacked_VS.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/misc_cimtests/defineStart_blockbacked_VS.py Thu Jul 23 05:40:29 2009 -0700
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. kalakeri <deeptik(a)linux.vnet.ibm.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# This test case verifies that libvirt-cim providers is able to define and
+# start a block backed VM.
+#
+# Usage:
+# python defineStart_blockbacked_VS.py -i localhost
+# -b /dev/sda6 -v KVM -N root/virt -u root -p <passwd> -l -c
+#
+# Date: 23-07-2009
+#
+
+import sys
+import os
+from optparse import OptionParser
+from commands import getstatusoutput
+sys.path.append('../../../lib')
+from CimTest import Globals
+from CimTest.Globals import logger, log_param
+from CimTest.ReturnCodes import PASS, FAIL
+sys.path.append('../lib')
+from XenKvmLib.vxml import get_class
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.xm_virt_util import active_domain_list
+from XenKvmLib.const import default_network_name
+from XenKvmLib.common_util import pre_check
+from XenKvmLib.common_util import create_netpool_conf, destroy_netpool
+from XenKvmLib.enumclass import EnumInstances
+
+TEST_LOG="cimtest.log"
+test_dom = "disk_backed_dom"
+
+def env_setup(sysname, virt, clean, debug):
+ env_ready = pre_check(sysname, virt)
+ if env_ready != None:
+ print "\n%s. Please check your environment.\n" % env_ready
+ return FAIL
+
+ if clean:
+ cmd = "rm -f %s" % (os.path.join(os.getcwd(), TEST_LOG))
+ status, output = getstatusoutput(cmd)
+
+ if debug:
+ dbg = "-d"
+ else:
+ dbg = ""
+
+ return PASS
+
+def verify_inputs(options, parser):
+ try:
+ if options.ns == None:
+ raise Exception("Please specify the NameSpace")
+
+ if options.username == None:
+ raise Exception("Please specify the Username")
+
+ if options.password == None:
+ raise Exception("Please specify the Password")
+
+ if options.vm_disk_image == None:
+ raise Exception("Please specify the diskimage for the VM")
+
+ except Exception, details:
+ logger.error("Input Verification failed ...")
+ logger.error("\"%s\"\n", details)
+ print parser.print_help()
+ return FAIL
+
+ return PASS
+
+def print_msg(msg1, field1, msg2, field2):
+ logger.info("%s '%s' %s '%s' ", msg1, field1, msg2, field2)
+ print msg1, "'", field1, "'", msg2, "'", field2, "'"
+
+
+def verify_guest_address_value(virt, sysname, vm_disk_image):
+ rasd_list = []
+ classname = get_typed_class(virt, "DiskResourceAllocationSettingData")
+ try:
+ rasd_list = EnumInstances(sysname, classname, ret_cim_inst=True)
+ if len(rasd_list) < 1:
+ raise Exception("%s returned %i instances, excepted at least 1."\
+ % (classname, len(rasd_list)))
+
+ for rasd in rasd_list:
+ # Verify the Address for the domain is set to vm_disk_image
+ if test_dom in rasd['InstanceID']:
+ if rasd['Address'] != "" and rasd['Address'] == vm_disk_image:
+ print_msg("Address field of", test_dom,
+ "is set to ", rasd['Address'])
+ return PASS
+
+ except Exception, detail:
+ logger.error("Exception: %s", detail)
+ return FAIL
+
+ print_msg("Address field of", test_dom, "is not set to", vm_disk_image)
+ return FAIL
+
+def main():
+ usage = "usage: %prog [options] \nex: %prog -i localhost"
+ parser = OptionParser(usage)
+
+ parser.add_option("-i", "--host-url", dest="h_url", default="localhost:5988",
+ help="URL of CIMOM to connect to (host:port)")
+ parser.add_option("-N", "--ns", dest="ns", default="root/virt",
+ help="Namespace (default is root/virt)")
+ parser.add_option("-u", "--user", dest="username", default=None,
+ help="Auth username for CIMOM on source system")
+ parser.add_option("-p", "--pass", dest="password", default=None,
+ help="Auth password for CIMOM on source system")
+ parser.add_option("-v", "--virt-type", dest="virt", default=None,
+ help="Virtualization type [ Xen | KVM ]")
+ parser.add_option("-c", "--clean-log", action="store_true", dest="clean",
+ help="Will remove existing log files before test run")
+ parser.add_option("-l", "--debug-output", action="store_true",
+ dest="debug", help="Duplicate the output to stderr")
+ parser.add_option("-b", "--vm-image", dest="vm_disk_image", default=None,
+ help="Specify the partition on which the vm" \
+ " image is instantiated, Ex: /dev/sda6")
+
+ print "\nPlease check cimtest.log in the curr dir for debug log msgs...\n"
+
+ (options, args) = parser.parse_args()
+
+ virt = options.virt
+
+ if ":" in options.h_url:
+ (sysname, port) = options.h_url.split(":")
+ else:
+ sysname = options.h_url
+
+ log_param(file_name=TEST_LOG)
+
+ # Verify if the CIMOM is running, clean cimtest.log and
+ # Set Debug option if requested
+ status = env_setup(sysname, virt, options.clean, options.debug)
+ if status != PASS:
+ return status
+
+ status = verify_inputs(options, parser)
+ if status != PASS:
+ return status
+
+ os.environ['CIM_NS'] = Globals.CIM_NS = options.ns
+ os.environ['CIM_USER'] = Globals.CIM_USER = options.username
+ os.environ['CIM_PASS'] = Globals.CIM_PASS = options.password
+
+ vm_disk_image = options.vm_disk_image
+ cxml = get_class(virt)(test_dom, disk_file_path=vm_disk_image)
+ status = FAIL
+
+ try:
+ status, netpool = create_netpool_conf(sysname, virt,
+ net_name=default_network_name)
+ if status != PASS:
+ logger.error("\nUnable to create network pool %s",
+ default_network_name)
+ return status
+
+ ret = cxml.cim_define(sysname)
+ if not ret:
+ raise Exception("Unable to define %s" % test_dom)
+
+ status = cxml.cim_start(sysname)
+ if status != PASS:
+ cxml.undefine(sysname)
+ logger.error("Failed to Start the dom: %s", test_dom)
+ raise Exception("Property values for '%s' not properly set" \
+ % test_dom)
+
+ active_doms = active_domain_list(sysname, virt)
+ if test_dom in active_doms:
+ status = verify_guest_address_value(virt, sysname, vm_disk_image)
+ if status == PASS:
+ print_msg("Domain", test_dom, "successfully created on",
+ sysname)
+ else:
+ logger.error("'%s' not found on the '%s'", test_dom, sysname)
+ status = FAIL
+
+ except Exception, detail:
+ logger.error("Exception: %s", detail)
+ destroy_netpool(sysname, virt, default_network_name)
+ return FAIL
+
+ cxml.cim_destroy(sysname)
+ cxml.undefine(sysname)
+ destroy_netpool(sysname, virt, default_network_name)
+
+ return status
+
+if __name__ == "__main__":
+ sys.exit(main())
+
15 years, 3 months
[PATCH] [TEST] Add new tc to verify block backed VM creation
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B.Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1248352829 25200
# Node ID 341282e8f94f7dd9fc758f0298b198ef2576e7b4
# Parent 28340a978f4e8f829d2d8998c04e0528436e1932
[TEST] Add new tc to verify block backed VM creation.
Tested with current sources on F10 and KVM.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 28340a978f4e -r 341282e8f94f suites/libvirt-cim/misc_cimtests/defineStart_blockbacked_VS.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/misc_cimtests/defineStart_blockbacked_VS.py Thu Jul 23 05:40:29 2009 -0700
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. kalakeri <deeptik(a)linux.vnet.ibm.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# This test case verifies that libvirt-cim providers is able to define and
+# start a block backed VM.
+#
+# Usage:
+# python defineStart_blockbacked_VS.py -i localhost
+# -b /dev/sda6 -v KVM -N root/virt -u root -p <passwd> -l -c
+#
+# Date: 23-07-2009
+#
+
+import sys
+import os
+from optparse import OptionParser
+from commands import getstatusoutput
+sys.path.append('../../../lib')
+from CimTest import Globals
+from CimTest.Globals import logger, log_param
+from CimTest.ReturnCodes import PASS, FAIL
+sys.path.append('../lib')
+from XenKvmLib.vxml import get_class
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.xm_virt_util import active_domain_list
+from XenKvmLib.const import default_network_name
+from XenKvmLib.common_util import pre_check
+from XenKvmLib.common_util import create_netpool_conf, destroy_netpool
+from XenKvmLib.enumclass import EnumInstances
+
+TEST_LOG="cimtest.log"
+test_dom = "disk_backed_dom"
+
+def env_setup(sysname, virt, clean, debug):
+ env_ready = pre_check(sysname, virt)
+ if env_ready != None:
+ print "\n%s. Please check your environment.\n" % env_ready
+ return FAIL
+
+ if clean:
+ cmd = "rm -f %s" % (os.path.join(os.getcwd(), TEST_LOG))
+ status, output = getstatusoutput(cmd)
+
+ if debug:
+ dbg = "-d"
+ else:
+ dbg = ""
+
+ return PASS
+
+def verify_inputs(options, parser):
+ try:
+ if options.ns == None:
+ raise Exception("Please specify the NameSpace")
+
+ if options.username == None:
+ raise Exception("Please specify the Username")
+
+ if options.password == None:
+ raise Exception("Please specify the Password")
+
+ if options.vm_disk_image == None:
+ raise Exception("Please specify the diskimage for the VM")
+
+ except Exception, details:
+ logger.error("Input Verification failed ...")
+ logger.error("\"%s\"\n", details)
+ print parser.print_help()
+ return FAIL
+
+ return PASS
+
+def print_msg(msg1, field1, msg2, field2):
+ logger.info("%s '%s' %s '%s' ", msg1, field1, msg2, field2)
+ print msg1, "'", field1, "'", msg2, "'", field2, "'"
+
+
+def verify_guest_address_value(virt, sysname, vm_disk_image):
+ rasd_list = []
+ classname = get_typed_class(virt, "DiskResourceAllocationSettingData")
+ try:
+ rasd_list = EnumInstances(sysname, classname, ret_cim_inst=True)
+ if len(rasd_list) < 1:
+ raise Exception("%s returned %i instances, excepted at least 1."\
+ % (classname, len(rasd_list)))
+
+ for rasd in rasd_list:
+ # Verify the Address for the domain is set to vm_disk_image
+ if test_dom in rasd['InstanceID']:
+ if rasd['Address'] != "" and rasd['Address'] == vm_disk_image:
+ print_msg("Address field of", test_dom,
+ "is set to ", rasd['Address'])
+ return PASS
+
+ except Exception, detail:
+ logger.error("Exception: %s", detail)
+ return FAIL
+
+ print_msg("Address field of", test_dom, "is not set to", vm_disk_image)
+ return FAIL
+
+def main():
+ usage = "usage: %prog [options] \nex: %prog -i localhost"
+ parser = OptionParser(usage)
+
+ parser.add_option("-i", "--host-url", dest="h_url", default="localhost:5988",
+ help="URL of CIMOM to connect to (host:port)")
+ parser.add_option("-N", "--ns", dest="ns", default="root/virt",
+ help="Namespace (default is root/virt)")
+ parser.add_option("-u", "--user", dest="username", default=None,
+ help="Auth username for CIMOM on source system")
+ parser.add_option("-p", "--pass", dest="password", default=None,
+ help="Auth password for CIMOM on source system")
+ parser.add_option("-v", "--virt-type", dest="virt", default=None,
+ help="Virtualization type [ Xen | KVM ]")
+ parser.add_option("-c", "--clean-log", action="store_true", dest="clean",
+ help="Will remove existing log files before test run")
+ parser.add_option("-l", "--debug-output", action="store_true",
+ dest="debug", help="Duplicate the output to stderr")
+ parser.add_option("-b", "--vm-image", dest="vm_disk_image", default=None,
+ help="Specify the partition on which the vm" \
+ " image is instantiated, Ex: /dev/sda6")
+
+ print "\nPlease check cimtest.log in the curr dir for debug log msgs...\n"
+
+ (options, args) = parser.parse_args()
+
+ virt = options.virt
+
+ if ":" in options.h_url:
+ (sysname, port) = options.h_url.split(":")
+ else:
+ sysname = options.h_url
+
+ log_param(file_name=TEST_LOG)
+
+ # Verify if the CIMOM is running, clean cimtest.log and
+ # Set Debug option if requested
+ status = env_setup(sysname, virt, options.clean, options.debug)
+ if status != PASS:
+ return status
+
+ status = verify_inputs(options, parser)
+ if status != PASS:
+ return status
+
+ os.environ['CIM_NS'] = Globals.CIM_NS = options.ns
+ os.environ['CIM_USER'] = Globals.CIM_USER = options.username
+ os.environ['CIM_PASS'] = Globals.CIM_PASS = options.password
+
+ vm_disk_image = options.vm_disk_image
+ cxml = get_class(virt)(test_dom, disk_file_path=vm_disk_image)
+ status = FAIL
+
+ try:
+ status, netpool = create_netpool_conf(sysname, virt,
+ net_name=default_network_name)
+ if status != PASS:
+ logger.error("\nUnable to create network pool %s",
+ default_network_name)
+ return status
+
+ ret = cxml.cim_define(sysname)
+ if not ret:
+ raise Exception("Unable to define %s" % test_dom)
+
+ status = cxml.cim_start(sysname)
+ if status != PASS:
+ cxml.undefine(sysname)
+ logger.error("Failed to Start the dom: %s", test_dom)
+ raise Exception("Property values for '%s' not properly set" \
+ % test_dom)
+
+ active_doms = active_domain_list(sysname, virt)
+ if test_dom in active_doms:
+ status = verify_guest_address_value(virt, sysname, vm_disk_image)
+ if status == PASS:
+ print_msg("Domain", test_dom, "successfully created on",
+ sysname)
+ else:
+ logger.error("'%s' not found on the '%s'", test_dom, sysname)
+ status = FAIL
+
+ except Exception, detail:
+ logger.error("Exception: %s", detail)
+ destroy_netpool(sysname, virt, default_network_name)
+ return FAIL
+
+ cxml.cim_destroy(sysname)
+ cxml.undefine(sysname)
+ destroy_netpool(sysname, virt, default_network_name)
+
+ return status
+
+if __name__ == "__main__":
+ sys.exit(main())
+
15 years, 3 months
[PATCH 0 of 2] [Test] Tests to verify multiple bridge interface and duplicate mac
by Yogananth Subramanian
Hello everyone,
I like to submit a set of tests to verify adding multiple bridge type interface and defining network interface with duplicate mac.
The first patch, creates a test called 22_addmulti_brg_interface.py to VirtualSystemManagementService
The second patch, creates a test called 23_verify_duplicate_mac_err.py to VirtualSystemManagementService
Thanks and Regards
Yogi
15 years, 3 months
[PATCH] [TEST]#3 Fixed the RASD/07_parent_disk_pool.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B.Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1248261340 25200
# Node ID 28340a978f4e8f829d2d8998c04e0528436e1932
# Parent 69398bff34dec41bd773fd7d8be550109d41913f
[TEST]#3 Fixed the RASD/07_parent_disk_pool.py
Patch 3:
-------
1) Defined pool constants in pool.py
2) converted the nested for loop into functions
Patch 2:
-------
1) Improved the code
Verified with KVM and current sources on F10 and SLES11.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 69398bff34de -r 28340a978f4e suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py
--- a/suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py Tue Jul 21 01:41:43 2009 -0700
+++ b/suites/libvirt-cim/cimtest/RASD/07_parent_disk_pool.py Wed Jul 22 04:15:40 2009 -0700
@@ -37,7 +37,7 @@
# -PoolID="DiskPool/0"
# -Type=3 [ For Type 1 and 2 as well ]
# -Path="/dev/null"
-# -DevicePath=
+# -DevicePaths=
# -Host="host_sys.domain.com"
# -SourceDirectory="/var/lib/images"
#
@@ -45,13 +45,55 @@
import sys
from sets import Set
+from copy import copy
from CimTest.Globals import logger
from XenKvmLib.const import do_main
from CimTest.ReturnCodes import PASS, FAIL
-from XenKvmLib.pool import get_pool_rasds
+from XenKvmLib.pool import get_pool_rasds, DIR_POOL, FS_POOL, NETFS_POOL, \
+ DISK_POOL, ISCSI_POOL, LOGICAL_POOL, SCSI_POOL
+
sup_types = ['KVM', 'Xen', 'XenFV']
-DISKPOOL_REC_LEN = 3
+DISKPOOL_REC_LEN = 7
+
+def init_list():
+ pval = "/dev/null"
+ dir_pool = { 'ResourceType' : 17,
+ 'PoolID' : "DiskPool/0",
+ 'Type' : DIR_POOL,
+ 'DevicePaths': None,
+ 'Host' : None, 'SourceDirectory': None,
+ 'Path' : pval
+ }
+
+ fs_pool = dir_pool.copy()
+ fs_pool['Type'] = FS_POOL
+ fs_pool['DevicePaths'] = [u'/dev/sda100']
+
+ netfs_pool = dir_pool.copy()
+ netfs_pool['Type'] = NETFS_POOL
+ netfs_pool['Host'] = u'host_sys.domain.com'
+ netfs_pool['SourceDirectory'] = u'/var/lib/images'
+
+ disk_pool = dir_pool.copy()
+ disk_pool['Type'] = DISK_POOL
+ disk_pool['DevicePaths'] = [u'/dev/VolGroup00/LogVol100']
+
+ iscsi_pool = dir_pool.copy()
+ iscsi_pool['Type'] = ISCSI_POOL
+ iscsi_pool['DevicePaths'] = [u'iscsi-target']
+ iscsi_pool['Host'] = u'host_sys.domain.com'
+
+ logical_pool = dir_pool.copy()
+ logical_pool['Type'] = LOGICAL_POOL
+
+ scsi_pool = dir_pool.copy()
+ scsi_pool['Type'] = SCSI_POOL
+ scsi_pool['Path'] = '/dev/disk/by-id'
+
+ exp_t_dp_h_sdir_path = [ dir_pool, fs_pool, netfs_pool, disk_pool,
+ iscsi_pool, logical_pool, scsi_pool ]
+ return exp_t_dp_h_sdir_path
def get_rec(diskpool_rasd, inst_id='Default'):
recs = []
@@ -60,6 +102,44 @@
recs.append(dp_rasd)
return recs
+def cmp_recs(item, rec):
+ try:
+ for key, val in item.iteritems():
+ exp_val = val
+ res_val = rec[key]
+ if type(val).__name__ == 'list':
+ cmp_exp = (len(Set(res_val) - Set(exp_val)) != 0)
+ elif type(val).__name__ != 'NoneType':
+ cmp_exp = (exp_val != res_val)
+ elif type(val).__name__ == 'NoneType':
+ continue
+
+ if cmp_exp:
+ raise Exception("Mismatching values, Got %s, "\
+ "Expected %s" % (res_val, exp_val))
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ return FAIL
+
+ return PASS
+
+def verify_records(exp_t_dp_h_sdir_path, rec):
+ try:
+ found = False
+ for item in exp_t_dp_h_sdir_path:
+ if rec['Type'] == item['Type']:
+ status = cmp_recs(item, rec)
+ if status != PASS:
+ raise Exception("Verification failed for '%s'" \
+ % rec['Type'])
+ found = True
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ return FAIL, found
+
+ return PASS, found
+
+
@do_main(sup_types)
def main():
options = main.options
@@ -70,16 +150,9 @@
if status != PASS:
return status
inst_list = [ 'Default', 'Minimum', 'Maximum', 'Increment' ]
- n_rec_val = { 'ResourceType' : 17,
- 'PoolID' : "DiskPool/0",
- 'Path' : "/dev/null",
- }
- exp_type_path_host_dir = [('1', 'None', 'None', 'None'),
- ('2', '/dev/sda100', 'None', 'None'),
- ('3', 'None', 'host_sys.domain.com',
- '/var/lib/images')]
-
-
+
+ exp_t_dp_h_sdir_path = init_list()
+
for inst_type in inst_list:
logger.info("Verifying '%s' records", inst_type)
@@ -89,23 +162,10 @@
raise Exception("Got %s recs instead of %s" %(len(n_rec),
DISKPOOL_REC_LEN))
- res_type_path_host_dir = []
for rec in n_rec:
- l = (str(rec['Type']), str(rec['DevicePath']),
- str(rec['Host']), str(rec['SourceDirectory']))
- res_type_path_host_dir.append(l)
-
- if len(Set(exp_type_path_host_dir) & Set(res_type_path_host_dir)) \
- != DISKPOOL_REC_LEN :
- raise Exception("Mismatching values, \nGot %s,\nExpected %s"\
- %(exp_type_path_host_dir,
- res_type_path_host_dir))
-
- for key in n_rec_val.keys():
- for rec in n_rec:
- if n_rec_val[key] != rec[key]:
- raise Exception("'%s' Mismatch, Got %s, Expected %s" \
- % (key, rec[key], n_rec_val[key]))
+ status, found = verify_records(exp_t_dp_h_sdir_path, rec)
+ if status != PASS or found == False:
+ return FAIL
except Exception, details:
logger.error("Exception details: %s", details)
diff -r 69398bff34de -r 28340a978f4e suites/libvirt-cim/lib/XenKvmLib/pool.py
--- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue Jul 21 01:41:43 2009 -0700
+++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed Jul 22 04:15:40 2009 -0700
@@ -40,6 +40,14 @@
input_graphics_pool_rev = 757
libvirt_cim_child_pool_rev = 837
+DIR_POOL = 1L
+FS_POOL = 2L
+NETFS_POOL = 3L
+DISK_POOL = 4L
+ISCSI_POOL = 5L
+LOGICAL_POOL = 6L
+SCSI_POOL = 7L
+
def pool_cn_to_rasd_cn(pool_cn, virt):
if pool_cn.find('ProcessorPool') >= 0:
return get_typed_class(virt, "ProcResourceAllocationSettingData")
15 years, 3 months