Deepti B. Kalakeri wrote:
# HG changeset patch
# User Deepti B. Kalakeri<deeptik(a)linux.vnet.ibm.com>
# Date 1246055400 25200
# Node ID 1e14cb463dc2251286e8e82099ac410c4e8d3453
# Parent d67a606da9f7d631368d04280865eb9a21e7ea8a
[TEST] Adding new tc to verify fs storage pool creation.
This tc will not be run in the batch mode of cimtest and hence needs to
be run individually using the command below.
python create_verify_storagepool.py -t 2 -d /dev/sda4 -m /tmp/mnt -n diskfs
-v Xen -u <username> -p <passwd>
Tested with Xen on RHEL with current sources for fs type pool.
Will Update the patch to include logical pool verification as well.
I get the following error:
# python create_verify_storagepool.py -t 2 -d /dev/sda1 -m /tmp/mnt -n
diskfs -v KVM -u user -p pass
Traceback (most recent call last):
File "create_verify_storagepool.py", line 41, in <module>
import TestSuite
ImportError: No module named TestSuite
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r d67a606da9f7 -r 1e14cb463dc2
suites/libvirt-cim/misc_cimtests/create_verify_storagepool.py
+# python create_verify_storagepool.py -t 2 -d /dev/sda4 -m /tmp/mnt
-n diskfs
+# -v Xen -u <username> -p <passwd>
Instead of passing the type number (which can be difficult to remember),
can you have it take a string value instead? Something like:
-t fs or -r logical
+#
+# Where t can be :
+# 2 - FileSystem
+# 4 - Logical etc
+#
+#
+# Date : 27.06.2009
+
+import os
+import sys
I had to add the following:
sys.path.append('../../../lib')
This fixed the error I pasted above.. After fixing this, I got the
following error:
Traceback (most recent call last):
File "create_verify_storagepool.py", line 45, in <module>
from XenKvmLib.classes import inst_to_mof, get_typed_class
ImportError: No module named XenKvmLib.classes
This can be fixed by using the following:
sys.path.append('../lib')
This needs to be before any imports from the XenKvmLib directory, so
you'll need to reorganize your import statements a bit..
+import TestSuite
+from optparse import OptionParser
+from CimTest import Globals
+from XenKvmLib.classes import inst_to_mof, get_typed_class
+from CimTest.Globals import logger, log_param
+from commands import getstatusoutput
+from pywbem import WBEMConnection, cim_types
+from distutils.text_file import TextFile
+from XenKvmLib.pool import get_pool_rasds
+
+PASS = 0
+FAIL = 1
+supp_types = [ 'Xen', 'KVM' ]
+pool_types = { 2 : 'DISK_POOL_FS', 4 : 'DISK_POOL_LOGICAL' }
+
+def verify_inputs(part_dev, mount_pt, pool_type):
+ if pool_type not in pool_types.keys():
+ logger.error("Pool type '%s' specified is not valid",
pool_type)
+ return FAIL
+
+ # TextFile skips all the lines which have comments and which are blank
+ fd = TextFile(filename="/etc/fstab")
+ fstab_info = [x.rstrip('\n') for x in fd.readlines()]
+ fd.close()
+
+ for line in fstab_info:
+ try:
+ # Check if the specified partition is mounted before using it
+ if part_dev in line.split()[0]:
+ logger.error("[%s] already mounted", part_dev)
+ raise Exception("Please specify free partition other than " \
+ "[%s]" % part_dev)
+
+ # Check if mount point is already used for mounting
+ if mount_pt in line.split()[1]:
+ logger.error("[%s] already mounted", mount_pt)
+ raise Exception("Please specify dir other than [%s]"
%mount_pt)
What if something is already mounted by it's not listed in /etc/fstab?
If you want to be sure something isn't mounted, you can check the
output of the "mount" command.
+
+ except Exception, details:
+ logger.error("Exception details is %s", details)
+ return FAIL
+
+ ## Check if the mount point specified already exist if not then create it..
+ if not os.path.exists(mount_pt):
+ os.mkdir(mount_pt)
+ else:
+ ## Check if the mount point specified is a dir
+ if not os.path.isdir(mount_pt):
+ logger.error("The mount point [%s] should be a dir", mount_pt)
+ return FAIL
+
+ files = os.listdir(mount_pt)
+ if len(files) != 0:
+ logger.info("The mount point [%s] given is not empty", mount_pt)
+
+ return PASS
+
+def get_uri(virt):
+ if virt == 'Xen':
+ vuri = 'xen:///'
+ elif vir_type == 'Kvm':
Instead of vir_type, this should be virt. Although, can you check for
'KVM' instead of 'Kvm'? That way it'll be similar to the options that
are passed when doing a cimtest bulk run.
+ vuri = 'qemu:///system'
+ return vuri
+
+def verify_pool(virt, pool_name):
+ vuri = get_uri(virt)
+ cmd = "virsh -c %s pool-list --all | grep %s" %(vuri, pool_name)
Need a space between % and (vuri..
+ return getstatusoutput(cmd)
+
+def main():
+ usage = "usage: %prog [options] \nex: %prog -i localhost"
+ parser = OptionParser(usage)
+
+ parser.add_option("-i", "--host-url", dest="h_url",
default="localhost:5988",
+ help="URL of CIMOM to connect to (host:port)")
+ parser.add_option("-N", "--ns", dest="ns",
default="root/virt",
+ help="Namespace (default is root/virt)")
+ parser.add_option("-u", "--user", dest="username",
default=None,
+ help="Auth username for CIMOM on source system")
+ parser.add_option("-p", "--pass", dest="password",
default=None,
+ help="Auth password for CIMOM on source system")
+ parser.add_option("-v", "--virt-type", dest="virt",
default=None,
+ help="Virtualization type [ Xen | KVM ]")
Can you add support for LXC here. The disk and network pool support in
libvirt is host wide. If you create a pool using the qemu uri, you
should also see it using the lxc uri.
+ parser.add_option("-t", "--pool-type",
dest="pool_type", default=None,
+ help="Pool type:[ fs | logical ]")
+ parser.add_option("-d", "--part-dev", dest="part_dev",
default=None,
+ help="specify the free partition to be used")
+ parser.add_option("-m", "--mnt_pt", dest="mnt_pt",
default=None,
+ help="Mount point to be used")
+ parser.add_option("-n", "--pool-name",
dest="pool_name", default=None,
+ help="Pool to be created")
+
+ (options, args) = parser.parse_args()
+
+ try:
+ if options.part_dev == None:
+ raise Exception("Free Partition to be mounted not specified")
+
+ if options.mnt_pt == None:
+ raise Exception("Mount points to be used not specified")
+
+ if options.pool_name == None:
+ raise Exception("Must specify the Pool Name to be created")
+
+ if options.virt == None or options.virt not in supp_types:
+ raise Exception("Must specify virtualization type")
+
+ if options.pool_type == None:
+ raise Exception("Must specify pool type to be tested")
+
+ except Exception, details:
+ print "FATAL: ", details
+ print parser.print_help()
+ return FAIL
+
+ part_dev = options.part_dev
+ mount_pt = options.mnt_pt
+ pool_name = options.pool_name
+ pool_type = cim_types.Uint16(options.pool_type)
+ virt = options.virt
+
+ testsuite = TestSuite.TestSuite(log=True)
+ log_param(file_name="cimtest.log")
+ print "Please check cimtest.log in the curr dir for debug log msgs..."
+ status = verify_inputs(part_dev, mount_pt, pool_type)
+ if status != PASS:
+ logger.error("Input verification failed")
+ return status
+
+ status, out = verify_pool(virt, pool_name)
+ if status == PASS:
+ logger.error("Pool --> '%s' already exist", pool_name)
+ logger.error("Specify some other pool name")
+ return status
+
+ if ":" in options.h_url:
+ (sysname, port) = options.h_url.split(":")
+ else:
+ sysname = options.h_url
+
+ src_conn = WBEMConnection('http://%s' % sysname,
+ (options.username, options.password), options.ns)
+
+ os.environ['CIM_NS'] = Globals.CIM_NS = options.ns
+ os.environ['CIM_USER'] = Globals.CIM_USER = options.username
+ os.environ['CIM_PASS'] = Globals.CIM_PASS = options.password
+
+ try:
+ status,dp_rasds = get_pool_rasds(sysname, virt, "DiskPool")
Need a space between status, and dp_rasds..
+ for i in range(0, len(dp_rasds)):
Instead of using "for i in range(0, len(dp_rasds))", why not use:
for rasd in dp_rasds:
+ pool_id = "%s/%s" %("DiskPool",
pool_name)
+ dpool_rasd=dp_rasds[i]
If you use "for rasd in dp_rasds:", then you don't need this line..
+ dpool_rasd['PoolID'] = pool_id
Not sure why you have this line here? Why do you set the PoolID of each
instance as you go through the loop? You should only set the attributes
of the RASD that matches the type you're looking for below..
Also, the provider ignores PoolID anyway - so you shouldn't need to set it.
+ if dpool_rasd['Type'] == pool_type and \
+ dpool_rasd['InstanceID'] == 'Default':
I'd put this call in a function.. loop through the template RASDs until
you find the RASD you need. Then return that from the function.
That'll reduce some of the code in main() and it'll reduce some of the
nesting here.
+ dpool_rasd['DevicePaths'] =[part_dev]
+ dpool_rasd['Path'] = mount_pt
+ dpool_rasd['InstanceID'] = pool_id
+ break
+ pool_settings = inst_to_mof(dpool_rasd)
+ rpcs_cn = get_typed_class(virt, "ResourcePoolConfigurationService")
+ res = src_conn.InvokeMethod("CreateChildResourcePool",
+ rpcs_cn,
+ Settings=[pool_settings],
+ ElementName=pool_name)
+ except Exception, details:
+ logger.error("Exception details: %s", details)
If you encounter an error here, you'll want to clean up and return an
error. No need to verify the pool if InvokeMethod() fails.
+
+ status, out = verify_pool(virt, pool_name)
+ if status != PASS:
+ logger.error("Failed to create pool: %s ", pool_name)
+ return status
+
+ vuri = get_uri(virt)
+ virsh = "virsh -c %s" % vuri
+ cmd = "%s pool-destroy %s && %s pool-undefine %s" \
+ %(virsh, pool_name, virsh, pool_name)
Need a space between %(virsh...
+ ret, out = getstatusoutput(cmd)
+ if ret != PASS:
+ logger.info("WARNING: pool %s was not cleaned on %s",
+ pool_name, sysname)
+ logger.info("WARNING: Please remove it manually")
I agree - you don't need to return an error here if the cleanup fails.
But I would change these to logger.error() statements or include print
statements as well. logger.info() statements don't get printed to
stdout, so the user won't know they need to clean up their environment.
Thanks Deepti!
--
Kaitlin Rupert
IBM Linux Technology Center
kaitlin(a)linux.vnet.ibm.com