[PATCH] [CU] url should be options.url in indication_tester.py
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1210633243 25200
# Node ID ea4548b7afe19e8d186246f4cce405565a1eb8c2
# Parent 79873958006140bbcd14122f9c7e1e80c00d9eef
[CU] url should be options.url in indication_tester.py
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 798739580061 -r ea4548b7afe1 tools/indication_tester.py
--- a/tools/indication_tester.py Fri May 02 11:32:27 2008 -0600
+++ b/tools/indication_tester.py Mon May 12 16:00:43 2008 -0700
@@ -413,7 +413,7 @@
if ":" in options.url:
(sysname, port) = options.url.split(":")
else:
- sysname = url
+ sysname = options.url
if options.dump:
dump_xml(options.name, args[0], options.ns, sysname)
16 years, 6 months
[PATCH] [TEST] #3 Updating 01_forward.py of SDC
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1210587792 25200
# Node ID cdcf642c493a548d2deb499f223bb9a1f414cb3c
# Parent 336c6f965baed9b56b484507171ad940e09e6096
[TEST] #3 Updating 01_forward.py of SDC.
1) To make use of the lib fn conf_file(), cleanup_restore() and create_diskpool_file().
2) To use the lib fn print_field_error.
3) To retain the PropertyPolicy, ValueRole, ValueRange depending on the revision no.
4) Tested on rpm based libvirt-cim KVM machine, latest libvirt-cim for KVM and Xen, XenFV.
5) This tc will fail on latest libvirt-cim for KVM, need storage pool to make test case work.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 336c6f965bae -r cdcf642c493a suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py
--- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Wed May 07 09:54:57 2008 -0700
+++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Mon May 12 03:23:12 2008 -0700
@@ -55,205 +55,189 @@ import sys
import sys
import os
from distutils.file_util import move_file
-from VirtLib import utils
from XenKvmLib import assoc
from XenKvmLib import enumclass
-from CimTest import Globals
-from CimTest.Globals import do_main
from XenKvmLib.test_xml import netxml
from XenKvmLib.test_doms import create_vnet
from VirtLib.live import net_list
from CimTest.ReturnCodes import PASS, FAIL, SKIP
+from CimTest.Globals import do_main, platform_sup, logger, \
+CIM_ERROR_GETINSTANCE, CIM_ERROR_ASSOCIATORS
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.common_util import cleanup_restore, test_dpath, \
+create_diskpool_file
+from XenKvmLib.common_util import print_field_error
+from XenKvmLib.const import CIM_REV
-sup_types = ['Xen']
-
-status = PASS
-test_dpath = "foo"
-disk_file = '/tmp/diskpool.conf'
-back_disk_file = disk_file + "." + "SSDC_01_forward"
diskid = "%s/%s" % ("DiskPool", test_dpath)
memid = "%s/%s" % ("MemoryPool", 0)
procid = "%s/%s" % ("ProcessorPool", 0)
+libvirtcim_sdc_rasd_rev = 571
-def conf_file():
+
+def get_or_bail(virt, ip, id, pool_class):
"""
- Creating diskpool.conf file.
- """
- try:
- f = open(disk_file, 'w')
- f.write('%s %s' % (test_dpath, '/'))
- f.close()
- except Exception,detail:
- Globals.logger.error("Exception: %s", detail)
- status = SKIP
- sys.exit(status)
-
-def clean_up_restore():
- """
- Restoring back the original diskpool.conf
- file.
- """
- try:
- if os.path.exists(back_disk_file):
- os.remove(disk_file)
- move_file(back_disk_file, disk_file)
- except Exception, detail:
- Globals.logger.error("Exception: %s", detail)
- status = SKIP
- sys.exit(status)
-
-
-def get_or_bail(ip, id, pool_class):
- """
- Getinstance for the CLass and return instance on success, otherwise
+ Getinstance for the Class and return instance on success, otherwise
exit after cleanup_restore .
"""
key_list = { 'InstanceID' : id }
-
try:
- instance = enumclass.getInstance(ip, pool_class, key_list)
+ instance = enumclass.getInstance(ip, pool_class, key_list, virt)
except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, '%s' % pool_class)
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore()
- status = FAIL
- sys.exit(status)
+ logger.error(CIM_ERROR_GETINSTANCE, '%s' % pool_class)
+ logger.error("Exception: %s", detail)
+ cleanup_restore()
+ sys.exit(FAIL)
return instance
-def init_list(disk, mem, net, proc):
+def init_list(virt, dpool, npool, mpool, ppool):
"""
Creating the lists that will be used for comparisons.
"""
instlist = [
- disk.InstanceID, \
- mem.InstanceID, \
- net.InstanceID, \
- proc.InstanceID
- ]
+ dpool.InstanceID,
+ mpool.InstanceID,
+ npool.InstanceID,
+ ppool.InstanceID
+ ]
cllist = [
- "Xen_DiskResourceAllocationSettingData", \
- "Xen_MemResourceAllocationSettingData", \
- "Xen_NetResourceAllocationSettingData", \
- "Xen_ProcResourceAllocationSettingData"
+ get_typed_class(virt, "DiskResourceAllocationSettingData"),
+ get_typed_class(virt, "MemResourceAllocationSettingData"),
+ get_typed_class(virt, "NetResourceAllocationSettingData"),
+ get_typed_class(virt, "ProcResourceAllocationSettingData")
]
rtype = {
- "Xen_DiskResourceAllocationSettingData" : 17, \
- "Xen_MemResourceAllocationSettingData" : 4, \
- "Xen_NetResourceAllocationSettingData" : 10, \
- "Xen_ProcResourceAllocationSettingData" : 3
+ get_typed_class(virt, "DiskResourceAllocationSettingData") : 17,
+ get_typed_class(virt, "MemResourceAllocationSettingData") : 4,
+ get_typed_class(virt, "NetResourceAllocationSettingData") : 10,
+ get_typed_class(virt, "ProcResourceAllocationSettingData") : 3
}
rangelist = {
- "Default" : 0, \
- "Minimum" : 1, \
- "Maximum" : 2, \
+ "Default" : 0,
+ "Minimum" : 1,
+ "Maximum" : 2,
"Increment" : 3
}
return instlist, cllist, rtype, rangelist
+def get_pool_info(virt, server, devid, poolname=""):
+ pool_cname = get_typed_class(virt, poolname)
+ pool_cn = eval("enumclass." + pool_cname)
+ return get_or_bail(virt, server, id=devid, pool_class=pool_cn)
-def print_error(index, fieldname, assoc_info, exp_value):
- ret_value = assoc_info[index][fieldname]
- Globals.logger.error("%s Mismatch", fieldname)
- Globals.logger.error("Returned %s instead of %s", ret_value, exp_value)
+def get_pool_details(virt, server):
+ dpool = npool = mpool = ppool = None
+ try :
+ dpool = get_pool_info(virt, server, diskid, poolname="DiskPool")
+ mpool = get_pool_info(virt, server, memid, poolname= "MemoryPool")
+ ppool = get_pool_info(virt, server, procid, poolname= "ProcessorPool")
-
-@do_main(sup_types)
-def main():
- options = main.options
- global status
-
- cn = 'Xen_AllocationCapabilities'
- loop = 0
- server = options.ip
-
- # Taking care of already existing diskconf file
- # Creating diskpool.conf if it does not exist
- # Otherwise backing up the prev file and create new one.
- os.system("rm -f %s" % back_disk_file )
- if not (os.path.exists(disk_file)):
- conf_file()
- else:
- move_file(disk_file, back_disk_file)
- conf_file()
-
- try :
- disk = get_or_bail(server, id=diskid, \
- pool_class=enumclass.Xen_DiskPool)
- mem = get_or_bail(server, id = memid, \
- pool_class=enumclass.Xen_MemoryPool)
- vir_network = net_list(server)
+ vir_network = net_list(server, virt)
if len(vir_network) > 0:
test_network = vir_network[0]
else:
bridgename = 'testbridge'
test_network = 'default-net'
net_xml, bridge = netxml(server, bridgename, test_network)
- ret = create_vnet(server, net_xml)
+ ret = create_vnet(server, net_xml, virt)
if not ret:
- Globals.logger.error("Failed to create the Virtual Network '%s'", \
- test_network)
+ logger.error("Failed to create Virtual Network '%s'",
+ test_network)
return SKIP
+
netid = "%s/%s" % ("NetworkPool", test_network)
- net = get_or_bail(server, id = netid, \
- pool_class=enumclass.Xen_NetworkPool)
- proc = get_or_bail(server, id = procid, \
- pool_class=enumclass.Xen_ProcessorPool)
+ npool = get_pool_info(virt, server, netid, poolname= "NetworkPool")
except Exception, detail:
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore()
- status = FAIL
- return status
+ logger.error("Exception: %s", detail)
+ return FAIL, dpool, npool, mpool, ppool
- instlist, cllist, rtype, rangelist = init_list(disk, mem, net, proc )
+ return PASS, dpool, npool, mpool, ppool
+def verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist):
+ for inst in assoc_info:
+ if inst.classname != cllist[loop]:
+ print_field_error("Classname", inst.classname, cllist[loop])
+ return FAIL
+ if inst['ResourceType'] != rtype[cllist[loop]]:
+ print_field_error("ResourceType", inst['ResourceType'],
+ rtype[cllist[loop]])
+ return FAIL
+
+ # The following properties have been removed in the patchset 571
+ # but is present in the rpm libvirt-cim and hence retained it.
+
+ if CIM_REV < libvirtcim_sdc_rasd_rev:
+ ppolicy = inst['PropertyPolicy']
+ if ppolicy != 0 and ppolicy != 1:
+ print_field_error("PropertyPolicy", inst['PropertyPolicy'],
+ ppolicy)
+ return FAIL
+
+ vrole = inst['ValueRole']
+ if vrole < 0 or vrole > 4:
+ print_field_error("ValueRole", inst['ValueRole'], vrole)
+ return FAIL
+
+ insid = inst['InstanceID']
+ vrange = rangelist[insid]
+ if vrange != inst['ValueRange']:
+ print_field_error("ValueRange", inst['ValueRange'], vrange)
+ return FAIL
+
+ return PASS
+
+def verify_sdc_with_ac(virt, server, dpool, npool, mpool, ppool):
+ loop = 0
+ instlist, cllist, rtype, rangelist = init_list(virt, dpool, npool, mpool,
+ ppool)
+ assoc_cname = get_typed_class(virt, "SettingsDefineCapabilities")
+ cn = get_typed_class(virt, "AllocationCapabilities")
for instid in sorted(instlist):
try:
- assoc_info = assoc.Associators(options.ip, \
- "Xen_SettingsDefineCapabilities",
- cn,
- InstanceID = instid)
+ assoc_info = assoc.Associators(server, assoc_cname, cn,
+ InstanceID = instid, virt=virt)
if len(assoc_info) != 4:
- Globals.logger.error("Xen_SettingsDefineCapabilities returned \
-%i ResourcePool objects instead 4", len(assoc_info))
+ logger.error("%s returned %i ResourcePool objects"
+ "instead 4", assoc_cname, len(assoc_info))
status = FAIL
break
- for i in range(len(assoc_info)):
- if assoc_info[i].classname != cllist[loop]:
- print_error(i, "Classname", assoc_info, cllist[loop])
- status = FAIL
- if assoc_info[i]['ResourceType'] != rtype[cllist[loop]]:
- print_error(i, "ResourceType", assoc_info, rtype[cllist[loop]])
- status = FAIL
- ppolicy = assoc_info[i]['PropertyPolicy']
- if ppolicy != 0 and ppolicy != 1:
- print_error(i, "PropertyPolicy", assoc_info, ppolicy)
- status = FAIL
- vrole = assoc_info[i]['ValueRole']
- if vrole < 0 or vrole > 4:
- print_error(i, "ValueRole", assoc_info, vrole)
- status = FAIL
- insid = assoc_info[i]['InstanceID']
- vrange = rangelist[insid]
- if vrange != assoc_info[i]['ValueRange']:
- print_error(i, "ValueRange", assoc_info, vrange)
- status = FAIL
- if status != 0:
- break
- if status != 0:
+ status = verify_rasd_fields(loop, assoc_info, cllist, rtype,
+ rangelist)
+ if status != PASS:
break
else:
loop = loop + 1
+
except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \
- 'Xen_SettingsDefineCapabilities')
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore()
+ logger.error(CIM_ERROR_ASSOCIATORS, assoc_cname)
+ logger.error("Exception: %s", detail)
status = FAIL
- clean_up_restore()
+ return status
+
+@do_main(platform_sup)
+def main():
+ options = main.options
+
+ server = options.ip
+ virt = options.virt
+
+ # Verify DiskPool on machine
+ status = create_diskpool_file()
+ if status != PASS:
+ return status
+
+ status, dpool, npool, mpool, ppool = get_pool_details(virt, server)
+ if status != PASS or dpool.InstanceID == None or mpool.InstanceID == None \
+ or npool.InstanceID == None or ppool.InstanceID == None:
+ cleanup_restore()
+ return FAIL
+
+ status = verify_sdc_with_ac(virt, server, dpool, npool, mpool, ppool)
+ cleanup_restore()
return status
if __name__ == "__main__":
16 years, 6 months
KVM test report on Fedora 9 (05/12)
by Guo Lian Yun
stro: Fedora 9 Beta
Kernel: 2.6.25-0.121.rc5.git4.fc9
Libvirt: 0.4.1-7.fc9
CIMOM: 2.7.0-6.fc9
PyWBEM: 0.6-1
libcmpiutil: 0.3-1.fc9
libvirt-cim: 0.3-4.fc9
cimtest: changeset-135
=================== FAIL ======================================
ComputerSystemIndication - 01_created_indication.py: FAIL
ElementConforms - 02_reverse.py: FAIL
NetworkPort - 03_user_netport.py: FAIL
SettingsDefine - 02_reverse.py: FAIL
VirtualSystemManagementService - 06_addresource.py: FAIL
========FULL CIMTEST REPORT=PASS(96)=FAIL(5)=SKIP(19)=XFAIL(9)===
AllocationCapabilities - 01_enum.py: PASS
AllocationCapabilities - 02_alloccap_gi_errs.py: PASS
ComputerSystem - 01_enum.py: PASS
ComputerSystem - 02_nosystems.py: PASS
ComputerSystem - 03_defineVS.py: PASS
ComputerSystem - 04_defineStartVS.py: PASS
ComputerSystem - 05_activate_defined_start.py: XFAIL Bug: 85769
ComputerSystem - 06_paused_active_suspend.py: XFAIL Bug: 85769
ComputerSystem - 22_define_suspend.py: PASS
ComputerSystem - 23_suspend_suspend.py: SKIP
ComputerSystem - 27_define_suspend_errs.py: SKIP
ComputerSystem - 32_start_reboot.py: SKIP
ComputerSystem - 33_suspend_reboot.py: SKIP
ComputerSystem - 35_start_reset.py: SKIP
ComputerSystem - 40_RSC_start.py: XFAIL Bug: 91410
InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Domain Operation Failed
Bug:<91410>
ComputerSystem - 41_cs_to_settingdefinestate.py: SKIP
ComputerSystem - 42_cs_gi_errs.py: PASS
ElementAllocatedFromPool - 01_forward.py: SKIP
ElementAllocatedFromPool - 02_reverse.py: SKIP
ElementAllocatedFromPool - 03_reverse_errs.py: XFAIL Bug: 88651
ElementAllocatedFromPool - 04_forward_errs.py: XFAIL Bug: 88651
ElementCapabilities - 01_forward.py: PASS
ElementCapabilities - 02_reverse.py: PASS
ElementCapabilities - 03_forward_errs.py: PASS
ElementCapabilities - 04_reverse_errs.py: PASS
ElementCapabilities - 05_hostsystem_cap.py: PASS
ElementConforms - 01_forward.py: PASS
ElementConforms - 02_reverse.py: FAIL
ElementConforms - 03_ectp_fwd_errs.py: XFAIL Bug: 92642
Bug:<92642>
Bug:<92642>
Bug:<92642>
ElementConforms - 04_ectp_rev_errs.py: XFAIL Bug: 92642
Bug:<92642>
Bug:<92642>
Bug:<92642>
Bug:<92642>
Bug:<92642>
Bug:<92642>
Bug:<92642>
Bug:<92642>
ElementSettingData - 01_forward.py: PASS
ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS
EnabledLogicalElementCapabilities - 01_enum.py: PASS
EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS
HostSystem - 01_enum.py: PASS
HostSystem - 02_hostsystem_to_rasd.py: PASS
HostSystem - 03_hs_to_settdefcap.py: PASS
HostSystem - 04_hs_to_EAPF.py: SKIP
HostSystem - 05_hs_gi_errs.py: PASS
HostSystem - 06_hs_to_vsms.py: PASS
HostedDependency - 01_forward.py: PASS
HostedDependency - 02_reverse.py: PASS
HostedDependency - 03_enabledstate.py: PASS
HostedDependency - 04_reverse_errs.py: PASS
HostedResourcePool - 01_forward.py: PASS
HostedResourcePool - 02_reverse.py: PASS
HostedResourcePool - 03_forward_errs.py: PASS
HostedResourcePool - 04_reverse_errs.py: PASS
HostedService - 01_forward.py: PASS
HostedService - 02_reverse.py: PASS
HostedService - 03_forward_errs.py: PASS
HostedService - 04_reverse_errs.py: PASS
LogicalDisk - 01_disk.py: PASS
LogicalDisk - 02_nodevs.py: PASS
LogicalDisk - 03_ld_gi_errs.py: PASS
Memory - 01_memory.py: PASS
Memory - 02_defgetmem.py: PASS
Memory - 03_mem_gi_errs.py: PASS
NetworkPort - 01_netport.py: PASS
NetworkPort - 02_np_gi_errors.py: PASS
NetworkPort - 03_user_netport.py: FAIL
Processor - 01_processor.py: PASS
Processor - 02_definesys_get_procs.py: PASS
Processor - 03_proc_gi_errs.py: PASS
Profile - 01_enum.py: PASS
Profile - 02_profile_to_elec.py: SKIP
Profile - 03_rprofile_gi_errs.py: SKIP
RASD - 01_verify_rasd_fields.py: PASS
RASD - 02_enum.py: PASS
RASD - 03_rasd_errs.py: PASS
ReferencedProfile - 01_verify_refprof.py: SKIP
ReferencedProfile - 02_refprofile_errs.py: SKIP
ResourceAllocationFromPool - 01_forward.py: PASS
ResourceAllocationFromPool - 02_reverse.py: PASS
ResourceAllocationFromPool - 03_forward_errs.py: PASS
ResourceAllocationFromPool - 04_reverse_errs.py: PASS
ResourceAllocationFromPool - 05_RAPF_err.py: PASS
ResourcePool - 01_enum.py: PASS
ResourcePool - 02_rp_gi_errors.py: PASS
ResourcePoolConfigurationCapabilities - 01_enum.py: PASS
ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS
ResourcePoolConfigurationService - 01_enum.py: PASS
ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS
ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS
ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: PASS
ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: XFAIL
Bug: 92173
InvokeMethod(AddResourcesToResourcePool): CIM_ERR_FAILED: Unknown Method
Bug:<92173>
ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py:
PASS
ResourcePoolConfigurationService - 07_DeleteResourcePool.py: PASS
SettingsDefine - 01_forward.py: PASS
SettingsDefine - 02_reverse.py: FAIL
SettingsDefine - 03_sds_fwd_errs.py: PASS
SettingsDefine - 04_sds_rev_errs.py: PASS
SettingsDefineCapabilities - 01_forward.py: SKIP
SettingsDefineCapabilities - 03_forward_errs.py: PASS
SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS
SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS
SystemDevice - 01_forward.py: PASS
SystemDevice - 02_reverse.py: PASS
SystemDevice - 03_fwderrs.py: PASS
VSSD - 01_enum.py: PASS
VSSD - 02_bootldr.py: SKIP
VSSD - 03_vssd_gi_errs.py: PASS
VSSD - 04_vssd_to_rasd.py: PASS
VirtualSystemManagementCapabilities - 01_enum.py: PASS
VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS
VirtualSystemManagementService - 01_definesystem_name.py: PASS
VirtualSystemManagementService - 02_destroysystem.py: PASS
VirtualSystemManagementService - 03_definesystem_ess.py: PASS
VirtualSystemManagementService - 04_definesystem_ers.py: PASS
VirtualSystemManagementService - 05_destroysystem_neg.py: PASS
VirtualSystemManagementService - 06_addresource.py: FAIL
InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Failed to create domain
VirtualSystemManagementService - 07_addresource_neg.py: PASS
VirtualSystemManagementService - 08_modifyresource.py: XFAIL Bug: 90853
InvokeMethod(ModifyResourceSettings): CIM_ERR_FAILED: Failed to create
domain
Bug:<90853>
VirtualSystemMigrationCapabilities - 01_enum.py: PASS
VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS
VirtualSystemMigrationService - 01_migratable_host.py: SKIP
VirtualSystemMigrationService - 02_host_migrate_type.py: SKIP
VirtualSystemMigrationService - 05_migratable_host_errs.py: SKIP
VirtualSystemMigrationSettingData - 01_enum.py: PASS
VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS
VirtualSystemSettingDataComponent - 01_forward.py: SKIP
VirtualSystemSettingDataComponent - 02_reverse.py: PASS
VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS
VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS
VirtualSystemSnapshotService - 01_enum.py: PASS
VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS
VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS
VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py:
PASS
Best,
Regards
Daisy (运国莲)
IBM STG Greater China Development Lab, Shanghai
E-mail: yunguol(a)cn.ibm.com
TEL: (86)-21-60922144
Building 10, 399 Ke Yuan Rd, Pudong Shanghai, 201203
16 years, 6 months
[PATCH] Add missing registration for LXC_DiskRASD
by Dan Smith
# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1210268930 25200
# Node ID 6cbf201e90c5fd571e988d7be3e64a4bef4cbac4
# Parent eece298b820b031be45dc5aca525a43a7bb029dd
Add missing registration for LXC_DiskRASD
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r eece298b820b -r 6cbf201e90c5 schema/ResourceAllocationSettingData.registration
--- a/schema/ResourceAllocationSettingData.registration Thu May 08 10:21:03 2008 -0700
+++ b/schema/ResourceAllocationSettingData.registration Thu May 08 10:48:50 2008 -0700
@@ -9,3 +9,4 @@ KVM_ProcResourceAllocationSettingData ro
KVM_ProcResourceAllocationSettingData root/virt Virt_RASD Virt_RASD instance
KVM_MemResourceAllocationSettingData root/virt Virt_RASD Virt_RASD instance
LXC_MemResourceAllocationSettingData root/virt Virt_RASD Virt_RASD instance
+LXC_DiskResourceAllocationSettingData root/virt Virt_RASD Virt_RASD instance
16 years, 6 months
[PATCH] Temporarily fall through on processor devices in DefineSystem()
by Dan Smith
# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1210267263 25200
# Node ID eece298b820b031be45dc5aca525a43a7bb029dd
# Parent 7d8223bceaf3206b13443db42e95d9e505ce5ff7
Temporarily fall through on processor devices in DefineSystem()
This is breaking tests just because it refuses a domain with processors
defined. This will be replaced with Jay's processor support when it's
ready.
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r 7d8223bceaf3 -r eece298b820b src/Virt_VirtualSystemManagementService.c
--- a/src/Virt_VirtualSystemManagementService.c Thu May 08 10:10:49 2008 -0700
+++ b/src/Virt_VirtualSystemManagementService.c Thu May 08 10:21:03 2008 -0700
@@ -457,6 +457,8 @@ static const char *_sysvirt_rasd_to_vdev
return net_rasd_to_vdev(inst, dev);
} else if (type == CIM_RES_TYPE_MEM) {
return mem_rasd_to_vdev(inst, dev);
+ } else if (type == CIM_RES_TYPE_PROC) {
+ return NULL; /* FIXME: replace when processor is done */
}
return "Resource type not supported on this platform";
16 years, 6 months
[PATCH] Make BlockSize * NumberOfBlocks result in the correct number of bytes
by Dan Smith
# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1210266649 25200
# Node ID 7d8223bceaf3206b13443db42e95d9e505ce5ff7
# Parent c2465f3f0a2472a45f90707ce9653ad2c59bb50b
Make BlockSize * NumberOfBlocks result in the correct number of bytes
Currently it adds up to kilobytes, so shift the size we get from libvirt
appropriately before doing the calculation.
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r c2465f3f0a24 -r 7d8223bceaf3 src/Virt_Device.c
--- a/src/Virt_Device.c Thu May 08 09:36:30 2008 -0700
+++ b/src/Virt_Device.c Thu May 08 10:10:49 2008 -0700
@@ -141,8 +141,8 @@ static int mem_set_size(CMPIInstance *in
{
uint64_t consumableblocks, numberofblocks;
- consumableblocks = dev->size/XEN_MEM_BLOCKSIZE;
- numberofblocks = dev->maxsize/XEN_MEM_BLOCKSIZE;
+ consumableblocks = (dev->size << 10) / XEN_MEM_BLOCKSIZE;
+ numberofblocks = (dev->maxsize << 10) / XEN_MEM_BLOCKSIZE;
CMSetProperty(instance, "BlockSize",
(CMPIValue *)&XEN_MEM_BLOCKSIZE, CMPI_uint64);
16 years, 6 months
[PATCH] Change VirtualQuantity to uint64 in all sample RASDs
by Dan Smith
# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1210350149 25200
# Node ID 874639813fb34a80455efc4c886a52672734943d
# Parent 1eef845c77d096d65b256384423b93cda19fb8db
Change VirtualQuantity to uint64 in all sample RASDs
Changes:
- Remove unneeded additional variable in disk case
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r 1eef845c77d0 -r 874639813fb3 src/Virt_SettingsDefineCapabilities.c
--- a/src/Virt_SettingsDefineCapabilities.c Thu May 08 09:23:56 2008 -0700
+++ b/src/Virt_SettingsDefineCapabilities.c Fri May 09 09:22:29 2008 -0700
@@ -403,13 +403,13 @@ static struct sdc_rasd_prop *proc_min(co
CMPIStatus *s)
{
bool ret;
- uint16_t num_procs = 1;
+ uint64_t num_procs = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Minimum", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -428,7 +428,7 @@ static struct sdc_rasd_prop *proc_max(co
{
bool ret;
virConnectPtr conn;
- uint16_t num_procs = 0;
+ uint64_t num_procs = 0;
struct sdc_rasd_prop *rasd = NULL;
CU_DEBUG("In proc_max()");
@@ -447,7 +447,7 @@ static struct sdc_rasd_prop *proc_max(co
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Maximum", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -466,13 +466,13 @@ static struct sdc_rasd_prop *proc_def(co
CMPIStatus *s)
{
bool ret;
- uint16_t num_procs = 1;
+ uint64_t num_procs = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Default", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -490,13 +490,13 @@ static struct sdc_rasd_prop *proc_inc(co
CMPIStatus *s)
{
bool ret;
- uint16_t num_procs = 1;
+ uint64_t num_procs = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Increment", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -514,38 +514,38 @@ static struct sdc_rasd_prop *net_min(con
CMPIStatus *s)
{
bool ret;
- uint16_t num_nics = 0;
+ uint64_t num_nics = 0;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Minimum", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
- PROP_END
- };
-
- ret = dup_rasd_prop_list(tmp, &rasd);
- if (!ret) {
- cu_statusf(_BROKER, s,
- CMPI_RC_ERR_FAILED,
- "Could not copy RASD");
- }
-
- return rasd;
-}
-
-static uint16_t net_max_kvm(const CMPIObjectPath *ref,
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
+ PROP_END
+ };
+
+ ret = dup_rasd_prop_list(tmp, &rasd);
+ if (!ret) {
+ cu_statusf(_BROKER, s,
+ CMPI_RC_ERR_FAILED,
+ "Could not copy RASD");
+ }
+
+ return rasd;
+}
+
+static uint64_t net_max_kvm(const CMPIObjectPath *ref,
CMPIStatus *s)
{
/* This appears to not require anything dynamic. */
return KVM_MAX_NICS;
}
-static uint16_t net_max_xen(const CMPIObjectPath *ref,
+static uint64_t net_max_xen(const CMPIObjectPath *ref,
CMPIStatus *s)
{
int rc;
virConnectPtr conn;
unsigned long version;
- uint16_t num_nics = -1;
+ uint64_t num_nics = -1;
conn = connect_by_classname(_BROKER, CLASSNAME(ref), s);
if (s->rc != CMPI_RC_OK) {
@@ -579,7 +579,7 @@ static struct sdc_rasd_prop *net_max(con
{
bool ret;
char *prefix;
- uint16_t num_nics;
+ uint64_t num_nics;
struct sdc_rasd_prop *rasd = NULL;
prefix = class_prefix_name(CLASSNAME(ref));
@@ -611,7 +611,7 @@ static struct sdc_rasd_prop *net_max(con
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Maximum", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
PROP_END
};
@@ -630,11 +630,11 @@ static struct sdc_rasd_prop *net_def(con
CMPIStatus *s)
{
bool ret;
- uint16_t num_nics = 1;
+ uint64_t num_nics = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Default", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
PROP_END
};
@@ -652,12 +652,12 @@ static struct sdc_rasd_prop *net_inc(con
CMPIStatus *s)
{
bool ret;
- uint16_t num_nics = 1;
+ uint64_t num_nics = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Increment", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
PROP_END
};
@@ -675,13 +675,13 @@ static struct sdc_rasd_prop *disk_min(co
CMPIStatus *s)
{
bool ret;
- uint16_t disk_size = SDC_DISK_MIN;
+ uint64_t disk_size = SDC_DISK_MIN;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Minimum", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint64},
PROP_END
};
@@ -701,8 +701,7 @@ static struct sdc_rasd_prop *disk_max(co
bool ret;
const char *inst_id;
CMPIrc prop_ret;
- uint16_t free_space;
- uint64_t free_64;
+ uint64_t free_space;
virConnectPtr conn;
CMPIInstance *pool_inst;
struct sdc_rasd_prop *rasd = NULL;
@@ -728,20 +727,19 @@ static struct sdc_rasd_prop *disk_max(co
if (s->rc != CMPI_RC_OK)
goto out;
- prop_ret = cu_get_u64_prop(pool_inst, "Capacity", &free_64);
+ prop_ret = cu_get_u64_prop(pool_inst, "Capacity", &free_space);
if (prop_ret != CMPI_RC_OK) {
cu_statusf(_BROKER, s,
CMPI_RC_ERR_FAILED,
"Could not get capacity from instance");
goto out;
}
- CU_DEBUG("Got capacity from pool_inst: %lld", free_64);
-
- free_space = (uint16_t)free_64;
+ CU_DEBUG("Got capacity from pool_inst: %lld", free_space);
+
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Maximum", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&free_space, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&free_space, CMPI_uint64},
PROP_END
};
@@ -760,13 +758,13 @@ static struct sdc_rasd_prop *disk_def(co
CMPIStatus *s)
{
bool ret;
- uint16_t disk_size = SDC_DISK_DEF;
+ uint64_t disk_size = SDC_DISK_DEF;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Default", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint64},
PROP_END
};
@@ -784,13 +782,13 @@ static struct sdc_rasd_prop *disk_inc(co
CMPIStatus *s)
{
bool ret;
- uint16_t disk_size = SDC_DISK_INC;
+ uint64_t disk_size = SDC_DISK_INC;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Increment", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint64},
PROP_END
};
16 years, 6 months
[PATCH] Change VirtualQuantity to uint64 in all sample RASDs
by Dan Smith
# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1210264590 25200
# Node ID c2465f3f0a2472a45f90707ce9653ad2c59bb50b
# Parent 1eef845c77d096d65b256384423b93cda19fb8db
Change VirtualQuantity to uint64 in all sample RASDs
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r 1eef845c77d0 -r c2465f3f0a24 src/Virt_SettingsDefineCapabilities.c
--- a/src/Virt_SettingsDefineCapabilities.c Thu May 08 09:23:56 2008 -0700
+++ b/src/Virt_SettingsDefineCapabilities.c Thu May 08 09:36:30 2008 -0700
@@ -403,13 +403,13 @@ static struct sdc_rasd_prop *proc_min(co
CMPIStatus *s)
{
bool ret;
- uint16_t num_procs = 1;
+ uint64_t num_procs = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Minimum", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -428,7 +428,7 @@ static struct sdc_rasd_prop *proc_max(co
{
bool ret;
virConnectPtr conn;
- uint16_t num_procs = 0;
+ uint64_t num_procs = 0;
struct sdc_rasd_prop *rasd = NULL;
CU_DEBUG("In proc_max()");
@@ -447,7 +447,7 @@ static struct sdc_rasd_prop *proc_max(co
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Maximum", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -466,13 +466,13 @@ static struct sdc_rasd_prop *proc_def(co
CMPIStatus *s)
{
bool ret;
- uint16_t num_procs = 1;
+ uint64_t num_procs = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Default", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -490,13 +490,13 @@ static struct sdc_rasd_prop *proc_inc(co
CMPIStatus *s)
{
bool ret;
- uint16_t num_procs = 1;
+ uint64_t num_procs = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Increment", CMPI_chars},
{"AllocationUnits", (CMPIValue *)"Processors", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_procs, CMPI_uint64},
PROP_END
};
@@ -514,38 +514,38 @@ static struct sdc_rasd_prop *net_min(con
CMPIStatus *s)
{
bool ret;
- uint16_t num_nics = 0;
+ uint64_t num_nics = 0;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Minimum", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
- PROP_END
- };
-
- ret = dup_rasd_prop_list(tmp, &rasd);
- if (!ret) {
- cu_statusf(_BROKER, s,
- CMPI_RC_ERR_FAILED,
- "Could not copy RASD");
- }
-
- return rasd;
-}
-
-static uint16_t net_max_kvm(const CMPIObjectPath *ref,
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
+ PROP_END
+ };
+
+ ret = dup_rasd_prop_list(tmp, &rasd);
+ if (!ret) {
+ cu_statusf(_BROKER, s,
+ CMPI_RC_ERR_FAILED,
+ "Could not copy RASD");
+ }
+
+ return rasd;
+}
+
+static uint64_t net_max_kvm(const CMPIObjectPath *ref,
CMPIStatus *s)
{
/* This appears to not require anything dynamic. */
return KVM_MAX_NICS;
}
-static uint16_t net_max_xen(const CMPIObjectPath *ref,
+static uint64_t net_max_xen(const CMPIObjectPath *ref,
CMPIStatus *s)
{
int rc;
virConnectPtr conn;
unsigned long version;
- uint16_t num_nics = -1;
+ uint64_t num_nics = -1;
conn = connect_by_classname(_BROKER, CLASSNAME(ref), s);
if (s->rc != CMPI_RC_OK) {
@@ -579,7 +579,7 @@ static struct sdc_rasd_prop *net_max(con
{
bool ret;
char *prefix;
- uint16_t num_nics;
+ uint64_t num_nics;
struct sdc_rasd_prop *rasd = NULL;
prefix = class_prefix_name(CLASSNAME(ref));
@@ -611,7 +611,7 @@ static struct sdc_rasd_prop *net_max(con
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Maximum", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
PROP_END
};
@@ -630,11 +630,11 @@ static struct sdc_rasd_prop *net_def(con
CMPIStatus *s)
{
bool ret;
- uint16_t num_nics = 1;
+ uint64_t num_nics = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Default", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
PROP_END
};
@@ -652,12 +652,12 @@ static struct sdc_rasd_prop *net_inc(con
CMPIStatus *s)
{
bool ret;
- uint16_t num_nics = 1;
+ uint64_t num_nics = 1;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Increment", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&num_nics, CMPI_uint64},
PROP_END
};
@@ -675,13 +675,13 @@ static struct sdc_rasd_prop *disk_min(co
CMPIStatus *s)
{
bool ret;
- uint16_t disk_size = SDC_DISK_MIN;
+ uint64_t disk_size = SDC_DISK_MIN;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Minimum", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint64},
PROP_END
};
@@ -701,7 +701,7 @@ static struct sdc_rasd_prop *disk_max(co
bool ret;
const char *inst_id;
CMPIrc prop_ret;
- uint16_t free_space;
+ uint64_t free_space;
uint64_t free_64;
virConnectPtr conn;
CMPIInstance *pool_inst;
@@ -737,11 +737,11 @@ static struct sdc_rasd_prop *disk_max(co
}
CU_DEBUG("Got capacity from pool_inst: %lld", free_64);
- free_space = (uint16_t)free_64;
+ free_space = free_64;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Maximum", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&free_space, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&free_space, CMPI_uint64},
PROP_END
};
@@ -760,13 +760,13 @@ static struct sdc_rasd_prop *disk_def(co
CMPIStatus *s)
{
bool ret;
- uint16_t disk_size = SDC_DISK_DEF;
+ uint64_t disk_size = SDC_DISK_DEF;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Default", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint64},
PROP_END
};
@@ -784,13 +784,13 @@ static struct sdc_rasd_prop *disk_inc(co
CMPIStatus *s)
{
bool ret;
- uint16_t disk_size = SDC_DISK_INC;
+ uint64_t disk_size = SDC_DISK_INC;
struct sdc_rasd_prop *rasd = NULL;
struct sdc_rasd_prop tmp[] = {
{"InstanceID", (CMPIValue *)"Increment", CMPI_chars},
{"AllocationQuantity", (CMPIValue *)"MegaBytes", CMPI_chars},
- {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint16},
+ {"VirtualQuantity", (CMPIValue *)&disk_size, CMPI_uint64},
PROP_END
};
16 years, 6 months
[PATCH] Add PoolID into DevicePool instances
by Dan Smith
# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1210263836 25200
# Node ID 1eef845c77d096d65b256384423b93cda19fb8db
# Parent a46dd9e2e4efbea37e7e5d2cf8c285e1a56bd726
Add PoolID into DevicePool instances
Changes:
- Consolidate the setting of common ResourcePool attributes
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r a46dd9e2e4ef -r 1eef845c77d0 src/Virt_DevicePool.c
--- a/src/Virt_DevicePool.c Thu May 08 08:25:23 2008 -0700
+++ b/src/Virt_DevicePool.c Thu May 08 09:23:56 2008 -0700
@@ -561,13 +561,28 @@ static bool procpool_set_total(CMPIInsta
return procs != 0;
}
-static bool set_units(CMPIInstance *inst,
- const char *units)
-{
- CMSetProperty(inst, "AllocationUnits",
- (CMPIValue *)units, CMPI_chars);
-
- return true;
+static void set_params(CMPIInstance *inst,
+ uint16_t type,
+ const char *id,
+ const char *units,
+ const char *caption)
+{
+ CMSetProperty(inst, "InstanceID",
+ (CMPIValue *)id, CMPI_chars);
+
+ CMSetProperty(inst, "PoolID",
+ (CMPIValue *)id, CMPI_chars);
+
+ CMSetProperty(inst, "ResourceType",
+ (CMPIValue *)&type, CMPI_uint16);
+
+ if (units != NULL)
+ CMSetProperty(inst, "AllocationUnits",
+ (CMPIValue *)units, CMPI_chars);
+
+ if (caption != NULL)
+ CMSetProperty(inst, "Caption",
+ (CMPIValue *)caption, CMPI_chars);
}
static CMPIStatus mempool_instance(virConnectPtr conn,
@@ -577,7 +592,6 @@ static CMPIStatus mempool_instance(virCo
const CMPIBroker *broker)
{
const char *id = "MemoryPool/0";
- uint16_t type = CIM_RES_TYPE_MEM;
CMPIInstance *inst;
CMPIStatus s = {CMPI_RC_OK, NULL};
@@ -595,13 +609,8 @@ static CMPIStatus mempool_instance(virCo
mempool_set_total(inst, conn);
mempool_set_reserved(inst, conn);
- set_units(inst, "KiloBytes");
-
- CMSetProperty(inst, "InstanceID",
- (CMPIValue *)id, CMPI_chars);
-
- CMSetProperty(inst, "ResourceType",
- (CMPIValue *)&type, CMPI_uint16);
+
+ set_params(inst, CIM_RES_TYPE_MEM, id, "KiloBytes", NULL);
inst_list_add(list, inst);
@@ -615,7 +624,6 @@ static CMPIStatus procpool_instance(virC
const CMPIBroker *broker)
{
const char *id = "ProcessorPool/0";
- uint16_t type = CIM_RES_TYPE_PROC;
CMPIInstance *inst;
CMPIStatus s = {CMPI_RC_OK, NULL};
@@ -632,13 +640,8 @@ static CMPIStatus procpool_instance(virC
ns);
procpool_set_total(inst, conn);
- set_units(inst, "Processors");
-
- CMSetProperty(inst, "InstanceID",
- (CMPIValue *)id, CMPI_chars);
-
- CMSetProperty(inst, "ResourceType",
- (CMPIValue *)&type, CMPI_uint16);
+
+ set_params(inst, CIM_RES_TYPE_PROC, id, "Processors", NULL);
inst_list_add(list, inst);
@@ -653,9 +656,9 @@ static CMPIStatus _netpool_for_network(s
const CMPIBroker *broker)
{
CMPIStatus s = {CMPI_RC_OK, NULL};
- char *str = NULL;
+ char *id = NULL;
+ char *cap = NULL;
char *bridge = NULL;
- uint16_t type = CIM_RES_TYPE_NET;
CMPIInstance *inst;
virNetworkPtr network = NULL;
@@ -681,36 +684,27 @@ static CMPIStatus _netpool_for_network(s
goto out;
}
- if (asprintf(&str, "NetworkPool/%s", netname) == -1) {
+ if (asprintf(&id, "NetworkPool/%s", netname) == -1) {
cu_statusf(broker, &s,
CMPI_RC_ERR_FAILED,
"");
goto out;
}
- CMSetProperty(inst, "InstanceID",
- (CMPIValue *)str, CMPI_chars);
- free(str);
-
bridge = virNetworkGetBridgeName(network);
- if (asprintf(&str, "Bridge: %s", bridge) == -1) {
+ if (asprintf(&cap, "Bridge: %s", bridge) == -1) {
cu_statusf(broker, &s,
CMPI_RC_ERR_FAILED,
"");
goto out;
}
- CMSetProperty(inst, "Caption",
- (CMPIValue *)str, CMPI_chars);
- free(str);
+ set_params(inst, CIM_RES_TYPE_NET, id, NULL, cap);
+ free(id);
+ free(cap);
free(bridge);
- CMSetProperty(inst, "ResourceType",
- (CMPIValue *)&type, CMPI_uint16);
-
-
inst_list_add(list, inst);
-
out:
virNetworkFree(network);
@@ -778,24 +772,13 @@ static CMPIInstance *diskpool_from_path(
{
CMPIInstance *inst;
char *poolid = NULL;
- const uint16_t type = CIM_RES_TYPE_DISK;
inst = get_typed_instance(broker, refcn, "DiskPool", ns);
if (asprintf(&poolid, "DiskPool/%s", pool->tag) == -1)
return NULL;
- CMSetProperty(inst, "InstanceID",
- (CMPIValue *)poolid, CMPI_chars);
-
- CMSetProperty(inst, "ResourceType",
- (CMPIValue *)&type, CMPI_uint16);
-
- CMSetProperty(inst, "AllocationUnits",
- (CMPIValue *)"Megabytes", CMPI_chars);
-
- CMSetProperty(inst, "Caption",
- (CMPIValue *)pool->tag, CMPI_chars);
+ set_params(inst, CIM_RES_TYPE_DISK, poolid, "Megabytes", pool->tag);
if (!diskpool_set_capacity(conn, inst, pool))
CU_DEBUG("Failed to set capacity for disk pool: %s",
16 years, 6 months
[PATCH] [TEST] #2 Updating 01_forward.py of SDC to support XenFV, KVM
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1210164893 25200
# Node ID adcbedf23f1d58e809fb4d91d6c40f83f21b776c
# Parent 88d20825a9ba6dd3d070e02f698a7846b7e3a2c8
[TEST] #2 Updating 01_forward.py of SDC to support XenFV, KVM.
1) To make use of the lib fn conf_file(), cleanup_restore() and create_diskpool_file().
2) To use the lib fn print_field_error.
3) To retain the PropertyPolicy, ValueRole, ValueRange depending on the revision no.
4) Tested on rpm based libvirt-cim KVM machine, latest libvirt-cim for KVM and Xen, XenFV.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 88d20825a9ba -r adcbedf23f1d suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py
--- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Wed May 07 05:34:23 2008 -0700
+++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Wed May 07 05:54:53 2008 -0700
@@ -55,135 +55,86 @@ import sys
import sys
import os
from distutils.file_util import move_file
-from VirtLib import utils
from XenKvmLib import assoc
from XenKvmLib import enumclass
-from CimTest import Globals
-from CimTest.Globals import do_main
from XenKvmLib.test_xml import netxml
from XenKvmLib.test_doms import create_vnet
from VirtLib.live import net_list
from CimTest.ReturnCodes import PASS, FAIL, SKIP
+from CimTest.Globals import do_main, platform_sup, logger, \
+CIM_ERROR_GETINSTANCE, CIM_ERROR_ASSOCIATORS
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.common_util import cleanup_restore, test_dpath, \
+create_diskpool_file
+from XenKvmLib.common_util import print_field_error
+from XenKvmLib.const import CIM_REV
-sup_types = ['Xen']
-
-status = PASS
-test_dpath = "foo"
-disk_file = '/tmp/diskpool.conf'
-back_disk_file = disk_file + "." + "SSDC_01_forward"
diskid = "%s/%s" % ("DiskPool", test_dpath)
memid = "%s/%s" % ("MemoryPool", 0)
procid = "%s/%s" % ("ProcessorPool", 0)
+libvirtcim_rev = 571
-def conf_file():
+
+def get_or_bail(virt, ip, id, pool_class):
"""
- Creating diskpool.conf file.
- """
- try:
- f = open(disk_file, 'w')
- f.write('%s %s' % (test_dpath, '/'))
- f.close()
- except Exception,detail:
- Globals.logger.error("Exception: %s", detail)
- status = SKIP
- sys.exit(status)
-
-def clean_up_restore():
- """
- Restoring back the original diskpool.conf
- file.
- """
- try:
- if os.path.exists(back_disk_file):
- os.remove(disk_file)
- move_file(back_disk_file, disk_file)
- except Exception, detail:
- Globals.logger.error("Exception: %s", detail)
- status = SKIP
- sys.exit(status)
-
-
-def get_or_bail(ip, id, pool_class):
- """
- Getinstance for the CLass and return instance on success, otherwise
+ Getinstance for the Class and return instance on success, otherwise
exit after cleanup_restore .
"""
key_list = { 'InstanceID' : id }
-
try:
- instance = enumclass.getInstance(ip, pool_class, key_list)
+ instance = enumclass.getInstance(ip, pool_class, key_list, virt)
except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, '%s' % pool_class)
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore()
- status = FAIL
- sys.exit(status)
+ logger.error(CIM_ERROR_GETINSTANCE, '%s' % pool_class)
+ logger.error("Exception: %s", detail)
+ cleanup_restore()
+ sys.exit(FAIL)
return instance
-def init_list(disk, mem, net, proc):
+def init_list(virt, dpool, npool, mpool, ppool):
"""
Creating the lists that will be used for comparisons.
"""
instlist = [
- disk.InstanceID, \
- mem.InstanceID, \
- net.InstanceID, \
- proc.InstanceID
- ]
+ dpool.InstanceID,
+ mpool.InstanceID,
+ npool.InstanceID,
+ ppool.InstanceID
+ ]
cllist = [
- "Xen_DiskResourceAllocationSettingData", \
- "Xen_MemResourceAllocationSettingData", \
- "Xen_NetResourceAllocationSettingData", \
- "Xen_ProcResourceAllocationSettingData"
+ get_typed_class(virt, "DiskResourceAllocationSettingData"),
+ get_typed_class(virt, "MemResourceAllocationSettingData"),
+ get_typed_class(virt, "NetResourceAllocationSettingData"),
+ get_typed_class(virt, "ProcResourceAllocationSettingData")
]
rtype = {
- "Xen_DiskResourceAllocationSettingData" : 17, \
- "Xen_MemResourceAllocationSettingData" : 4, \
- "Xen_NetResourceAllocationSettingData" : 10, \
- "Xen_ProcResourceAllocationSettingData" : 3
+ get_typed_class(virt, "DiskResourceAllocationSettingData") : 17,
+ get_typed_class(virt, "MemResourceAllocationSettingData") : 4,
+ get_typed_class(virt, "NetResourceAllocationSettingData") : 10,
+ get_typed_class(virt, "ProcResourceAllocationSettingData") : 3
}
rangelist = {
- "Default" : 0, \
- "Minimum" : 1, \
- "Maximum" : 2, \
+ "Default" : 0,
+ "Minimum" : 1,
+ "Maximum" : 2,
"Increment" : 3
}
return instlist, cllist, rtype, rangelist
+def get_pool_info(virt, server, devid, poolname=""):
+ pool_cname = get_typed_class(virt, poolname)
+ pool_cn = eval("enumclass." + pool_cname)
+ return get_or_bail(virt, server, id=devid, pool_class=pool_cn)
-def print_error(index, fieldname, assoc_info, exp_value):
- ret_value = assoc_info[index][fieldname]
- Globals.logger.error("%s Mismatch", fieldname)
- Globals.logger.error("Returned %s instead of %s", ret_value, exp_value)
+def get_pool_details(virt, server):
+ dpool = npool = mpool = ppool = None
+ try :
+ dpool = get_pool_info(virt, server, diskid, poolname="DiskPool")
+ mpool = get_pool_info(virt, server, memid, poolname= "MemoryPool")
+ ppool = get_pool_info(virt, server, procid, poolname= "ProcessorPool")
-
-@do_main(sup_types)
-def main():
- options = main.options
- global status
-
- cn = 'Xen_AllocationCapabilities'
- loop = 0
- server = options.ip
-
- # Taking care of already existing diskconf file
- # Creating diskpool.conf if it does not exist
- # Otherwise backing up the prev file and create new one.
- os.system("rm -f %s" % back_disk_file )
- if not (os.path.exists(disk_file)):
- conf_file()
- else:
- move_file(disk_file, back_disk_file)
- conf_file()
-
- try :
- disk = get_or_bail(server, id=diskid, \
- pool_class=enumclass.Xen_DiskPool)
- mem = get_or_bail(server, id = memid, \
- pool_class=enumclass.Xen_MemoryPool)
- vir_network = net_list(server)
+ vir_network = net_list(server, virt)
if len(vir_network) > 0:
test_network = vir_network[0]
else:
@@ -192,68 +143,102 @@ def main():
net_xml, bridge = netxml(server, bridgename, test_network)
ret = create_vnet(server, net_xml)
if not ret:
- Globals.logger.error("Failed to create the Virtual Network '%s'", \
- test_network)
- return SKIP
+ logger.error("Failed to create Virtual Network '%s'",
+ test_network)
+ return SKIP, dpool, npool, mpool, ppool
+
netid = "%s/%s" % ("NetworkPool", test_network)
- net = get_or_bail(server, id = netid, \
- pool_class=enumclass.Xen_NetworkPool)
- proc = get_or_bail(server, id = procid, \
- pool_class=enumclass.Xen_ProcessorPool)
+ npool = get_pool_info(virt, server, netid, poolname= "NetworkPool")
except Exception, detail:
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore()
- status = FAIL
- return status
+ logger.error("Exception: %s", detail)
+ return FAIL, dpool, npool, mpool, ppool
- instlist, cllist, rtype, rangelist = init_list(disk, mem, net, proc )
+ return PASS, dpool, npool, mpool, ppool
+def verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist):
+ for inst in assoc_info:
+ if inst.classname != cllist[loop]:
+ print_field_error("Classname", inst.classname, cllist[loop])
+ return FAIL
+ if inst['ResourceType'] != rtype[cllist[loop]]:
+ print_field_error("ResourceType", inst['ResourceType'],
+ rtype[cllist[loop]])
+ return FAIL
+
+ # The following properties have been removed in the patchset 571
+ # but is present in the rpm libvirt-cim and hence retained it.
+
+ if CIM_REV < libvirtcim_rev:
+ ppolicy = inst['PropertyPolicy']
+ if ppolicy != 0 and ppolicy != 1:
+ print_field_error("PropertyPolicy", inst['PropertyPolicy'],
+ ppolicy)
+ return FAIL
+
+ vrole = inst['ValueRole']
+ if vrole < 0 or vrole > 4:
+ print_field_error("ValueRole", inst['ValueRole'], vrole)
+ return FAIL
+
+ insid = inst['InstanceID']
+ vrange = rangelist[insid]
+ if vrange != inst['ValueRange']:
+ print_field_error("ValueRange", inst['ValueRange'], vrange)
+ return FAIL
+
+ return PASS
+
+def verify_sdc_with_ac(virt, server, dpool, npool, mpool, ppool):
+ loop = 0
+ instlist, cllist, rtype, rangelist = init_list(virt, dpool, npool, mpool,
+ ppool)
+ assoc_cname = get_typed_class(virt, "SettingsDefineCapabilities")
+ cn = get_typed_class(virt, "AllocationCapabilities")
for instid in sorted(instlist):
try:
- assoc_info = assoc.Associators(options.ip, \
- "Xen_SettingsDefineCapabilities",
- cn,
- InstanceID = instid)
+ assoc_info = assoc.Associators(server, assoc_cname, cn,
+ InstanceID = instid, virt=virt)
if len(assoc_info) != 4:
- Globals.logger.error("Xen_SettingsDefineCapabilities returned \
-%i ResourcePool objects instead 4", len(assoc_info))
+ logger.error("%s returned %i ResourcePool objects"
+ "instead 4", assoc_cname, len(assoc_info))
status = FAIL
break
- for i in range(len(assoc_info)):
- if assoc_info[i].classname != cllist[loop]:
- print_error(i, "Classname", assoc_info, cllist[loop])
- status = FAIL
- if assoc_info[i]['ResourceType'] != rtype[cllist[loop]]:
- print_error(i, "ResourceType", assoc_info, rtype[cllist[loop]])
- status = FAIL
- ppolicy = assoc_info[i]['PropertyPolicy']
- if ppolicy != 0 and ppolicy != 1:
- print_error(i, "PropertyPolicy", assoc_info, ppolicy)
- status = FAIL
- vrole = assoc_info[i]['ValueRole']
- if vrole < 0 or vrole > 4:
- print_error(i, "ValueRole", assoc_info, vrole)
- status = FAIL
- insid = assoc_info[i]['InstanceID']
- vrange = rangelist[insid]
- if vrange != assoc_info[i]['ValueRange']:
- print_error(i, "ValueRange", assoc_info, vrange)
- status = FAIL
- if status != 0:
- break
- if status != 0:
+
+ status = verify_rasd_fields(loop, assoc_info, cllist, rtype,
+ rangelist)
+ if status != PASS:
break
else:
loop = loop + 1
+
except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \
- 'Xen_SettingsDefineCapabilities')
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore()
+ logger.error(CIM_ERROR_ASSOCIATORS, assoc_cname)
+ logger.error("Exception: %s", detail)
status = FAIL
- clean_up_restore()
+ return status
+
+@do_main(platform_sup)
+def main():
+ options = main.options
+
+ server = options.ip
+ virt = options.virt
+
+ # Verify DiskPool on machine
+ status = create_diskpool_file()
+ if status != PASS:
+ return status
+
+ status, dpool, npool, mpool, ppool = get_pool_details(virt, server)
+ if status != PASS or dpool.InstanceID == None or mpool.InstanceID == None \
+ or npool.InstanceID == None or ppool.InstanceID == None:
+ cleanup_restore()
+ return FAIL
+
+ status = verify_sdc_with_ac(virt, server, dpool, npool, mpool, ppool)
+ cleanup_restore()
return status
if __name__ == "__main__":
16 years, 6 months