Kaitlin Rupert wrote:
Deepti B. Kalakeri wrote:
> # HG changeset patch
> # User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
> # Date 1208960298 -19800
> # Node ID 70e13e2a2abf57a666c8e4d5029d06ecd017ea69
> # Parent 0123ff5809dc099c0b7840fce65eca5bce0921e6
> [TEST] Making use of the lib fn conf_file(), cleanup_restore() and
> create_diskpool_file().
>
> Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
Looks good Deepti - just a few comments.
>
> diff -r 0123ff5809dc -r 70e13e2a2abf
> suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py
> global status
> specific_fields = { }
> if (len(inst_list)) != 1:
> - Globals.logger.error("Got %s record for Memory/Network/LogicalDisk
> instead of \
> -1", len(inst_list))
> + Globals.logger.error("Got %s record for Memory/Network/LogicalDisk \
> + instead of 1", len(inst_list))
Instead, you can do the following:
Globals.logger.error("Got %s record for Memory/Network/LogicalDisk"
" instead of 1", len(inst_list))
The block below has some strange indenting.
I applied the same patch which I submitted on the other day .
I tried using the hg export .I dont find any prob with the indentation
with this patch :(
> try :
> - disk = get_or_bail(server, id=diskid, \
> - pool_class=enumclass.Xen_DiskPool)
> - mem = get_or_bail(server, id = memid, \
> + disk = get_or_bail(server, id=diskid,
> pool_class=enumclass.Xen_DiskPool)
> + mem = get_or_bail(server, id = memid,
> pool_class=enumclass.Xen_MemoryPool)
> netid = "%s/%s" % ("NetworkPool", virt_network)
> - net = get_or_bail(server, id = netid, \
> + net = get_or_bail(server, id = netid,
> pool_class=enumclass.Xen_NetworkPool) - proc = get_or_bail(server, id
> = procid, \
> + proc = get_or_bail(server, id = procid,
> pool_class=enumclass.Xen_ProcessorPool) except Exception, detail:
> Globals.logger.error("Exception: %s", detail)
> - clean_up_restore(server)
> + cleanup_restore()
> status = FAIL
> - ret = test_domain_function(test_dom, server, \
> - cmd = "destroy")
> + ret = test_domain_function(test_dom, server, cmd = "destroy")
> return status
>
> - pllist, cllist, prop_list, proc_prop = init_list(server, disk, mem,
> net, proc)
> + pllist, cllist, prop_list, proc_prop = init_list(server, disk, mem,
> net, + proc)
>
> # Looping through the pllist to get association for various pools.
> for cn, instid in sorted(pllist.items()):
> try:
> - assoc_info = assoc.Associators(server, \
> - "Xen_ElementAllocatedFromPool", \
> - cn, \
> - InstanceID = instid) + assoc_info = assoc.Associators(server, +
> "Xen_ElementAllocatedFromPool", + cn, + InstanceID = instid) #
> Verifying the Creation Class name for all the records returned for
> each # pool class queried
> inst_list = get_inst_for_dom(assoc_info)
> @@ -300,16 +265,16 @@ the specified domain: %s", test_dom)
> status = FAIL
> break
>
> - assoc_values(assoc_list=inst_list, field="CreationClassName", \
> - list=cllist, \
> - index=loop)
> + assoc_values(assoc_list=inst_list, field="CreationClassName", +
> list=cllist, + index=loop)
> # verifying the DeviceID
> if inst_list[0]['CreationClassName'] == 'Xen_Processor':
> # The DeviceID for the processor varies from 0 to (vcpu - 1 )
> list_index = 0
> - assoc_values(assoc_list=inst_list, field="DeviceID", \
> - list=proc_prop, \
> - index=list_index)
> + assoc_values(assoc_list=inst_list, field="DeviceID", +
> list=proc_prop, + index=list_index)
> else:
> # For LogicalDisk, Memory and NetworkPort
> if inst_list[0]['CreationClassName'] == 'Xen_LogicalDisk':
> @@ -318,40 +283,41 @@ the specified domain: %s", test_dom)
> list_index = 2 else:
> list_index = 4 # NetworkPort
> - assoc_values(assoc_list=inst_list, field="DeviceID", \
> - list=prop_list, \
> - index=list_index)
> + assoc_values(assoc_list=inst_list, field="DeviceID", +
> list=prop_list, + index=list_index)
> if inst_list[0]['CreationClassName'] == 'Xen_LogicalDisk':
> # verifying the Name field for LogicalDisk - specific_fields =
> get_spec_fields_list(inst_list,field_name="Name")
> + specific_fields = get_spec_fields_list(inst_list, field_name="Name")
> list_index = 1 elif inst_list[0]['CreationClassName'] ==
'Xen_Memory':
> # verifying the NumberOfBlocks allocated for Memory
> - specific_fields =
> get_spec_fields_list(inst_list,field_name="NumberOfBlocks")
> + specific_fields = get_spec_fields_list(inst_list, +
> field_name="NumberOfBlocks")
> list_index = 3 else:
> # verifying the NetworkAddresses for the NetworkPort
> - specific_fields =
> get_spec_fields_list(inst_list,field_name="NetworkAddresses")
> + specific_fields = get_spec_fields_list(inst_list, +
> field_name="NetworkAddresses")
> list_index = 5 # NetworkPort
> - assoc_values(assoc_list=inst_list, field="Other", \
> - list=prop_list, \
> - index=list_index, \
> - specific_fields_list=specific_fields)
> + assoc_values(assoc_list=inst_list, field="Other", + list=prop_list,
> + index=list_index, + specific_fields_list=specific_fields)
> if status != PASS:
> diff -r 0123ff5809dc -r 70e13e2a2abf
> suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py
> exp_list = [
> - {'desc' : "No such instance (SystemName)", 'rc' :
> pywbem.CIM_ERR_NOT_FOUND}, \
> - {'desc' : "No DeviceID specified", 'rc' :
pywbem.CIM_ERR_FAILED}, \
> - {'desc' : "No such instance", 'rc' :
pywbem.CIM_ERR_NOT_FOUND}, \
> - {'desc' : "One or more parameter values passed to the method were
> invalid", \
> - 'rc' : pywbem.CIM_ERR_INVALID_PARAMETER}, \
> - {'desc' : "No such instance (CreationClassName)", 'rc' :
> pywbem.CIM_ERR_NOT_FOUND }, \
> - {'desc' : "No such instance (SystemCreationClassName)",
'rc' : \
> - pywbem.CIM_ERR_NOT_FOUND }, + {'desc' : "No such instance
> (SystemName)", 'rc' : pywbem.CIM_ERR_NOT_FOUND}, + {'desc' :
"No
> DeviceID specified", 'rc' : pywbem.CIM_ERR_FAILED}, + {'desc' :
"No
> such instance", 'rc' : pywbem.CIM_ERR_NOT_FOUND}, + {'desc' :
> "CIM_ERR_INVALID_PARAMETER", + 'rc' :
> pywbem.CIM_ERR_INVALID_PARAMETER}, + {'desc' : "No such instance
> (CreationClassName)",
> + 'rc' : pywbem.CIM_ERR_NOT_FOUND }, + {'desc' : "No such
instance
> (SystemCreationClassName)", + 'rc' : pywbem.CIM_ERR_NOT_FOUND }, ]
These changes cause this to fail on F9 with a release rpm. Would be
good to branch this test so that it passes on both the release rpm and
on current sources.
>
> - return try_assoc(conn, exp_ret, test_dom_invalid, test_keys, \
> - test_vals, log_msg)
> + return try_assoc(conn, exp_ret, test_dom_invalid, test_keys,
> test_vals, + log_msg)
>