# HG changeset patch
# User Deepti B. Kalakeri<deeptik(a)linux.vnet.ibm.com>
# Date 1246281830 25200
# Node ID 3d4e90851a09756b52a452047cfcba8191a1e794
# Parent d67a606da9f7d631368d04280865eb9a21e7ea8a
[TEST] Adding new tc to verify fs storage pool creation.
Update in patch 2:
------------------
1) rearranged import stmst
2) add check to see if cimserver is started
3) Added options to clean the old log
4) Added options to get the debug msg on the stdout
5) Added lxc support
6) Moved the looping for setting the poolsettings in a function
7) Rectified the virt_type to virt and also to use KVM for checking while setting vuri
Patch 1:
--------
This tc will not be run in the batch mode of cimtest and hence needs to
be run individually using the command below.
python create_verify_storagepool.py -t 2 -d /dev/sda4 -m /tmp/mnt -n diskfs
-v Xen -u <username> -p <passwd>
Tested with Xen on RHEL with current sources for fs type pool.
Will Update the patch to include logical pool verification as well.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r d67a606da9f7 -r 3d4e90851a09
suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py Mon Jun 29 06:23:50
2009 -0700
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. Kalakeri<dkalaker(a)in.ibm.com>
+#
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# This test case should test the CreateChildResourcePool service
+# supplied by the RPCS provider.
+# This tc verifies the FileSystem Type storage pool.
+#
+# The test case is not run in the batch run and we need to run it using
+# the following command:
+# python create_verify_storagepool.py -t 2 -d /dev/sda4 -m /tmp/mnt -n diskfs
+# -v Xen -u <username> -p <passwd>
+#
+# Where t can be :
+# 2 - FileSystem
+# 4 - Logical etc
+#
+#
+# Date : 27.06.2009
+
+import os
+import sys
+from optparse import OptionParser
+from commands import getstatusoutput
+from distutils.text_file import TextFile
+from pywbem import WBEMConnection, cim_types
+sys.path.append('../../../lib')
+from CimTest import Globals
+from CimTest.Globals import logger, log_param
+sys.path.append('../lib')
+from XenKvmLib.classes import inst_to_mof, get_typed_class
+from XenKvmLib.pool import get_pool_rasds
+sys.path.append('../')
+from main import pre_check
+
+PASS = 0
+FAIL = 1
+TEST_LOG="cimtest.log"
+
+supp_types = [ 'Xen', 'KVM' , 'LXC' ]
+pool_types = { 'DISK_POOL_FS' : 2 }
+
+def verify_cmd_options(options):
+ try:
+ if options.part_dev == None:
+ raise Exception("Free Partition to be mounted not specified")
+
+ if options.mnt_pt == None:
+ raise Exception("Mount points to be used not specified")
+
+ if options.pool_name == None:
+ raise Exception("Must specify the Pool Name to be created")
+
+ if options.virt == None or options.virt not in supp_types:
+ raise Exception("Must specify virtualization type")
+
+ if options.pool_type == None:
+ raise Exception("Must specify pool type to be tested")
+
+ except Exception, details:
+ print "FATAL: ", details
+ print parser.print_help()
+ return FAIL
+
+ return PASS
+
+def env_setup(sysname, virt, clean, debug):
+ env_ready = pre_check(sysname, virt)
+ if env_ready != None:
+ print "\n%s. Please check your environment.\n" % env_ready
+ return FAIL
+
+ if clean:
+ cmd = "rm -f %s" % (os.path.join(os.getcwd(), TEST_LOG))
+ status, output = getstatusoutput(cmd)
+
+ if debug:
+ dbg = "-d"
+ else:
+ dbg = ""
+
+ return PASS
+
+def get_pooltype(pooltype, virt):
+ if pooltype == "fs":
+ pool_type = pool_types['DISK_POOL_FS']
+ else:
+ logger.error("Invalid pool type ....")
+ return None, None
+ return PASS, pool_type
+
+def verify_inputs(part_dev, mount_pt):
+ del_dir = False
+ cmd = "mount"
+ status, mount_info = getstatusoutput(cmd)
+ if status != PASS:
+ logger.error("Failed to get mount info.. ")
+ return FAIL, del_dir
+
+ for line in mount_info.split('\n'):
+ try:
+ # Check if the specified partition is mounted before using it
+ part_name = line.split()[0]
+ if part_dev == part_name:
+ logger.error("[%s] already mounted", part_dev)
+ raise Exception("Please specify free partition other than " \
+ "[%s]" % part_dev)
+
+ # Check if mount point is already used for mounting
+ mount_name = line.split()[2]
+ if mount_pt == mount_name:
+ logger.error("[%s] already mounted", mount_pt)
+ raise Exception("Please specify dir other than [%s]"
%mount_pt)
+
+ except Exception, details:
+ logger.error("%s", details)
+ return FAIL, del_dir
+
+ # Check if the mount point specified already exist, if not then create it..
+ if not os.path.exists(mount_pt):
+ os.mkdir(mount_pt)
+
+ # set del_dir to True so that we remove it before exiting from the tc.
+ del_dir = True
+ else:
+ # Check if the mount point specified is a dir
+ if not os.path.isdir(mount_pt):
+ logger.error("The mount point [%s] should be a dir", mount_pt)
+ return FAIL, del_dir
+
+ files = os.listdir(mount_pt)
+ if len(files) != 0:
+ logger.info("The mount point [%s] given is not empty", mount_pt)
+
+ return PASS, del_dir
+
+def get_uri(virt):
+ if virt == 'Xen':
+ vuri = 'xen:///'
+ elif virt == 'KVM':
+ vuri = 'qemu:///system'
+ elif virt == 'LXC':
+ vuri = 'lxc:///system'
+ return vuri
+
+def get_pool_settings(dp_rasds, pooltype, part_dev, mount_pt, pool_name):
+ pool_settings = None
+ for dpool_rasd in dp_rasds:
+ if dpool_rasd['Type'] == pooltype and \
+ dpool_rasd['InstanceID'] == 'Default':
+ dpool_rasd['DevicePaths'] =[part_dev]
+ dpool_rasd['Path'] = mount_pt
+ dp_pid = "%s/%s" %("DiskPool", pool_name)
+ dpool_rasd['PoolID'] = dpool_rasd['InstanceID'] = dp_pid
+ break
+ if not pool_name in dpool_rasd['InstanceID']:
+ return pool_settings
+
+ pool_settings = inst_to_mof(dpool_rasd)
+ return pool_settings
+
+
+def verify_pool(virt, pool_name):
+ vuri = get_uri(virt)
+ cmd = "virsh -c %s pool-list --all | grep %s" % (vuri, pool_name)
+ return getstatusoutput(cmd)
+
+def cleanup(virt, pool_name, sysname, mount_pt, del_dir):
+ virsh = "virsh -c %s" % get_uri(virt)
+ cmd = "%s pool-destroy %s && %s pool-undefine %s" \
+ % (virsh, pool_name, virsh, pool_name)
+ ret, out = getstatusoutput(cmd)
+ if ret != PASS:
+ logger.error("WARNING: pool '%s' was not cleaned on
'%s'",
+ pool_name, sysname)
+ logger.error("WARNING: Please remove it manually")
+
+ if del_dir == True:
+ cmd ="rm -rf %s" % mount_pt
+ status, out = getstatusoutput(cmd)
+ if status != PASS:
+ logger.error("WARNING: '%s' was not removed", mount_pt)
+ logger.error("WARNING: Please remove %s manually", mount_pt)
+
+def main():
+ usage = "usage: %prog [options] \nex: %prog -i localhost"
+ parser = OptionParser(usage)
+
+ parser.add_option("-i", "--host-url", dest="h_url",
default="localhost:5988",
+ help="URL of CIMOM to connect to (host:port)")
+ parser.add_option("-N", "--ns", dest="ns",
default="root/virt",
+ help="Namespace (default is root/virt)")
+ parser.add_option("-u", "--user", dest="username",
default=None,
+ help="Auth username for CIMOM on source system")
+ parser.add_option("-p", "--pass", dest="password",
default=None,
+ help="Auth password for CIMOM on source system")
+ parser.add_option("-v", "--virt-type", dest="virt",
default=None,
+ help="Virtualization type [ Xen | KVM ]")
+ parser.add_option("-t", "--pool-type",
dest="pool_type", default=None,
+ help="Pool type:[ fs | logical ]")
+ parser.add_option("-d", "--part-dev", dest="part_dev",
default=None,
+ help="specify the free partition to be used")
+ parser.add_option("-m", "--mnt_pt", dest="mnt_pt",
default=None,
+ help="Mount point to be used")
+ parser.add_option("-n", "--pool-name",
dest="pool_name", default=None,
+ help="Pool to be created")
+ parser.add_option("-c", "--clean-log",
+ action="store_true", dest="clean",
+ help="Will remove existing log files before test run")
+ parser.add_option("-l", "--debug-output",
action="store_true", dest="debug",
+ help="Duplicate the output to stderr")
+
+ (options, args) = parser.parse_args()
+
+ # Verify command line options
+ status = verify_cmd_options(options)
+ if status != PASS:
+ return status
+
+ part_dev = options.part_dev
+ mount_pt = options.mnt_pt
+ pool_name = options.pool_name
+ virt = options.virt
+
+ if ":" in options.h_url:
+ (sysname, port) = options.h_url.split(":")
+ else:
+ sysname = options.h_url
+
+ # Verify if the CIMOM is running, clean cimtest.log if requested
+ # Set Debug option if requested
+ status = env_setup(sysname, virt, options.clean, options.debug)
+ if status != PASS:
+ return status
+
+ log_param(file_name=TEST_LOG)
+
+ print "Please check cimtest.log in the curr dir for debug log msgs..."
+
+ status, pooltype = get_pooltype(options.pool_type, virt)
+ if status != PASS:
+ return FAIL
+
+ pooltype = cim_types.Uint16(pooltype)
+
+ status, del_dir = verify_inputs(part_dev, mount_pt)
+ if status != PASS:
+ if del_dir == True:
+ cmd ="rm -rf %s" % mount_pt
+ status, out = getstatusoutput(cmd)
+ logger.error("Input verification failed")
+ return status
+
+
+ status, out = verify_pool(virt, pool_name)
+ if status == PASS:
+ logger.error("Pool --> '%s' already exist", pool_name)
+ logger.error("Specify some other pool name")
+ return status
+
+ try:
+ src_conn = WBEMConnection('http://%s' % sysname, (options.username,
+ options.password), options.ns)
+
+ os.environ['CIM_NS'] = Globals.CIM_NS = options.ns
+ os.environ['CIM_USER'] = Globals.CIM_USER = options.username
+ os.environ['CIM_PASS'] = Globals.CIM_PASS = options.password
+
+ # Get DiskPoolRASD's from SDC association with AC of DiskPool/0
+ status, dp_rasds = get_pool_rasds(sysname, virt, "DiskPool")
+ if status != PASS:
+ raise Exception("Failed to get DiskPool Rasd's")
+
+ # Get the DiskPoolRASD mof with appropriate values of diskpool
+ # to be created....
+ pool_settings = get_pool_settings(dp_rasds, pooltype, part_dev,
+ mount_pt, pool_name)
+ if pool_settings == None:
+ raise Exception("Did not get the required pool settings ...")
+
+ rpcs_cn = get_typed_class(virt, "ResourcePoolConfigurationService")
+ # Create DiskPool..
+ res = src_conn.InvokeMethod("CreateChildResourcePool",
+ rpcs_cn,
+ Settings=[pool_settings],
+ ElementName=pool_name)
+
+ # Verify if the desired pool was successfully created ..
+ status, out = verify_pool(virt, pool_name)
+ if status != PASS:
+ raise Exception("Failed to create pool: %s " % pool_name)
+
+ except Exception, details:
+ logger.error("In main(), exception '%s'", details)
+ if del_dir == True:
+ cmd ="rm -rf %s" % mount_pt
+ status, out = getstatusoutput(cmd)
+ return FAIL
+
+ # Clean up the pool and the mount dir that was created ...
+ cleanup(virt, pool_name, sysname, mount_pt, del_dir)
+
+ if res[0] == PASS:
+ logger.info("Pool %s was successfully verified for pool type %s",
+ pool_name , options.pool_type)
+
+ # Place holder to give a hint to the user the tc passed
+ # otherwise the user will have to look into the cimtest.log in the
+ # current dir.
+ print "Pool '", pool_name,"' was successfully verified for
pool type "\
+ "'", options.pool_type , "'"
+ else:
+ logger.error("Test Failed to verify '%s' pool creation ....",
+ options.pool_type)
+ return res[0]
+if __name__=="__main__":
+ sys.exit(main())
+