2to3 dict transform

Change-Id: I06e7019fbe8d1331751c6ec091817c24c4e1b590
(cherry picked from commit 28d7094f0e2d78c3397a5edf0fbe96085732bedf)
This commit is contained in:
Mark Charney
2017-06-10 20:15:27 -04:00
parent e8e1a25d6d
commit 4eccd7d1a4
31 changed files with 186 additions and 186 deletions

View File

@@ -177,14 +177,14 @@ class actions_codegen_t(object):
captured '''
tuple2actions = {}
rules = tuple2rule.values()
rules = list(tuple2rule.values())
self.rules = rules
self.all_fbs, self.common_fbs = self._gather_all_fb(rules)
self.max_nt_number, self.max_ntluf_number = self._get_max_nt_number(rules)
self.ret_action = False
self.has_emit = self._has_emit(rules)
for tuple, rule in tuple2rule.iteritems():
for tuple, rule in tuple2rule.items():
actions = self._create_fb_actions(self.all_fbs, self.common_fbs, rule)
nts = self._create_nt_actions(rule.actions)
ntlufs = self._create_ntluf_actions(rule.actions)
@@ -273,7 +273,7 @@ class actions_codegen_t(object):
f_call = "res=(*%s)(%s)" % (hash_val, self.strings_dict['obj_str'])
actions_list.append(validation + f_call)
nt = self.tuple2rule.values()[0].nt
nt = list(self.tuple2rule.values())[0].nt
obj_str = self.strings_dict['obj_str']
emit_call = "xed_encoder_request_iforms(%s)->x_%s=hidx+1"
actions_list.append(emit_call % (obj_str,nt))
@@ -294,7 +294,7 @@ class actions_codegen_t(object):
#dump the emit action
if self.has_emit:
nt = self.tuple2rule.values()[0].nt
nt = list(self.tuple2rule.values())[0].nt
obj_str = self.strings_dict['obj_str']
emit_call = "xed_encoder_request_iforms(%s)->x_%s=%s"
hash_entry = "%s[%s].emit" % (self.strings_dict['table_name'],

View File

@@ -44,11 +44,11 @@ def uniquify_list(l):
d = {}
for a in l:
d[a]=True
return d.keys()
return list(d.keys())
def expand_all_of_once(d):
found = False
for chip,ext_list in d.iteritems():
for chip,ext_list in d.items():
newexts = []
for ext in ext_list:
m = all_of_pattern.match(ext)
@@ -68,7 +68,7 @@ def expand_macro(d,expander):
found = expander(d)
def expand_macro_not(d):
for chip,ext_list in d.iteritems():
for chip,ext_list in d.items():
to_remove = []
positive_exts = []
for ext in ext_list:
@@ -172,7 +172,7 @@ def work(arg):
# the XED_ISA_SET_ enum
isa_set = set()
for vl in chip_features_dict.values():
for vl in list(chip_features_dict.values()):
for v in vl:
isa_set.add(v.upper())
isa_set = list(isa_set)

View File

@@ -92,14 +92,14 @@ def classify(kys,env):
env.funky.append(kys)
def dump_classifications(env):
for k in env.lengths.keys():
for k in list(env.lengths.keys()):
v = env.lengths[k]
print("LENGTH {} COUNT {}".format(k,v))
for lst in env.funky:
print(str(lst))
u = len(env.unique_sequences.keys())
u = len(list(env.unique_sequences.keys()))
print("TOTAL KEY SEQUENCES {}".format(env.all_keys))
print("UNIQUE KEY SEQUENCES {}".format(u))
print("")
@@ -130,7 +130,7 @@ def main(env):
u = {}
for k in kys_lst:
u[str(k)]=k
unique_keys = u.values()
unique_keys = list(u.values())
for k in unique_keys:
classify(k,env)

View File

@@ -883,7 +883,7 @@ class array_gen_t(object):
number of elements."""
expected_len = len(self.ranges)
for idict,value in self.values:
if len(idict.keys()) != expected_len:
if len(list(idict.keys())) != expected_len:
return False
return True
@@ -968,7 +968,7 @@ class array_gen_t(object):
for indices_dict,value in self.values:
if key in indices_dict:
present_values[indices_dict[key]] = True
return present_values.keys()
return list(present_values.keys())
def make_initialization_function(self, init_function_name,verbose=False):

View File

@@ -154,7 +154,7 @@ class constraint_vec_gen_t(object):
new_cdict.int2tuple = dict((i,t) for t,i in
new_cdict.tuple2int.iteritems())
new_cdict.tuple2int.items())
return new_cdict

View File

@@ -56,7 +56,7 @@ def work(args): # main function
for k,v in sorted( histo.items(), key=lambda t: t[1] ):
for k,v in sorted( list(histo.items()), key=lambda t: t[1] ):
print("{0:4d} {1}".format(v,k))
print("TOTAL: ", len(histo))

View File

@@ -390,7 +390,7 @@ class nonterminal_dict_t(object):
self.nonterminal_info = {}
def keys(self):
return self.nonterminal_info.keys()
return list(self.nonterminal_info.keys())
def add_graph_node(self, nt_name, node_id):
"""set the node id in the graph node"""
@@ -640,7 +640,7 @@ def compute_state_space(state_dict):
# a dictionary of the values of a each operand_decider
state_values = {}
for k in state_dict.keys():
for k in list(state_dict.keys()):
vals = state_dict[k]
for wrd in vals.list_of_str:
m = restriction_pattern.search(wrd)
@@ -1209,7 +1209,7 @@ class instruction_info_t(partitionable_info_t):
accept(r'[{]', lines)
reached_closing_bracket = False
# FIXME add more error checking
structured_input_dict = dict(zip(structured_input_tags.keys(),
structured_input_dict = dict(zip(list(structured_input_tags.keys()),
len(structured_input_tags)*[False]))
found_operands = False
filling_extra = False
@@ -1334,7 +1334,7 @@ class instruction_info_t(partitionable_info_t):
if reached_closing_bracket:
if found_operands == False:
die("Did not find operands for " + self.iclass)
for k in structured_input_dict.keys():
for k in list(structured_input_dict.keys()):
if structured_input_dict[k] == False:
if structured_input_tags[k]:
die("Required token missing: "+ k)
@@ -1419,7 +1419,7 @@ class instruction_info_t(partitionable_info_t):
return ''.join(s)
if self.prebindings:
s.append('prebindings: \n\t' +
'\n\t'.join(map(str,self.prebindings.values())) + '\n')
'\n\t'.join(map(str,list(self.prebindings.values()))) + '\n')
for op in self.operands:
s.append(pad)
s.append(" ")
@@ -1640,7 +1640,7 @@ def remove_overridden_versions(parser):
d[ii.iclass] = [ii]
iis = []
for ilist in d.values():
for ilist in list(d.values()):
iis.extend(ilist)
parser.instructions = iis
return parser
@@ -1920,7 +1920,7 @@ class graph_node(object):
found_value = False
found_other = False
value = None
for k,nxt in self.next.iteritems():
for k,nxt in self.next.items():
if k == 'other' and found_other==False:
found_other = True
elif found_value == False:
@@ -2066,7 +2066,7 @@ def partition_by_required_values(options, instructions, bitpos, token,
d[ other ].append((trimmed_vals,ii) )
#msge("RETURNING FROM PARTITION: %s" % ( str(d.keys())))
return (d, all_values.keys() )
return (d, list(all_values.keys()) )
def all_same_operand_decider(ilist,bitpos):
@@ -2575,7 +2575,7 @@ def build_sub_graph(common, graph, bitpos, skipped_bits):
need_to_splatter = False
previous_trimmed_values = None
scalar_values = set()
for k,partition in node_partition.iteritems():
for k,partition in node_partition.items():
if vpart():
msge("SPATTER SCAN: Operand decider partition key= " + str(k))
if isinstance(partition[0],types.TupleType):
@@ -2608,7 +2608,7 @@ def build_sub_graph(common, graph, bitpos, skipped_bits):
if need_to_splatter:
msge("Splattering because of conflicting 'other' conditions")
new_node_partition = {}
for k,partition in node_partition.iteritems():
for k,partition in node_partition.items():
if isinstance(partition[0],types.TupleType):
for trimmed_values, ii in partition:
for tv in trimmed_values:
@@ -2627,7 +2627,7 @@ def build_sub_graph(common, graph, bitpos, skipped_bits):
# set up the next nodes and give them their instructions.
for k,partition in node_partition.iteritems():
for k,partition in node_partition.items():
if vpart():
msge("PARTITIION: Operand decider partition key= " + str(k))
next_node = new_node(graph,k,bitpos)
@@ -2643,7 +2643,7 @@ def build_sub_graph(common, graph, bitpos, skipped_bits):
next_node.instructions.extend(partition)
# build the subgraphs for the children
for child in graph.next.itervalues():
for child in graph.next.values():
# RECUR for operand-decider
build_sub_graph(common, child, bitpos, 0)
return
@@ -2841,7 +2841,7 @@ def build_graph(common, parser_output, operand_storage_dict):
def print_graph(options, node, pad =''):
s = node.dump_str(pad)
msge(s)
for k,nxt in node.next.iteritems(): # PRINTING
for k,nxt in node.next.items(): # PRINTING
s = pad + ' key: ' + str(k)
msge(s)
print_graph(options, nxt, pad + ' ')
@@ -3006,7 +3006,7 @@ def collect_tree_depth(node, depths={}, depth=0):
except:
depths[cdepth] = 1
else:
for child in node.next.itervalues():
for child in node.next.values():
collect_tree_depth(child, depths, cdepth)
return depths
@@ -3017,7 +3017,7 @@ def collect_ifield(options, node, field, master_list):
s = getattr(ii,field)
if s not in master_list:
master_list.append(s)
for child in node.next.itervalues():
for child in node.next.values():
# FIXME: sloppy return value handling???
collect_ifield(options,child, field,master_list)
return master_list
@@ -3031,7 +3031,7 @@ def collect_ofield(options, node, field, master_list):
s = getattr(opnd,field)
if s != None and s not in master_list:
master_list[s] = True
for child in node.next.itervalues():
for child in node.next.values():
collect_ofield(options,child, field,master_list)
def collect_ofield_operand_type(options, node, field, master_list):
@@ -3043,7 +3043,7 @@ def collect_ofield_operand_type(options, node, field, master_list):
#s = getattr(opnd,field)
if s != None and s not in master_list:
master_list[s] = True
for child in node.next.itervalues():
for child in node.next.values():
collect_ofield_operand_type(options,child, field,master_list)
@@ -3056,7 +3056,7 @@ def collect_ofield_name_type(options, node, field, master_list):
type = getattr(opnd,'type')
if s not in master_list:
master_list[s]=type
for child in node.next.itervalues():
for child in node.next.values():
collect_ofield_name_type(options,child, field,master_list)
@@ -3081,7 +3081,7 @@ def collect_attributes(options, node, master_list):
master_list.append(x)
elif s != None and s not in master_list:
master_list.append(s)
for nxt in node.next.itervalues():
for nxt in node.next.values():
collect_attributes(options,nxt, master_list)
@@ -3098,7 +3098,7 @@ def write_instruction_data(odir,idata_dict):
open_mode = "a"
idata_files += 1
f = open(os.path.join(odir,fn),open_mode)
kys = idata_dict.keys()
kys = list(idata_dict.keys())
kys.sort()
s = "#%-19s %-15s %-15s %-30s %-20s %s\n" % ("iclass",
"extension",
@@ -3146,7 +3146,7 @@ def write_attributes_table(agi, odir):
if vattr():
msgb("Unique attributes", len(agi.attributes_dict))
t = []
for s,v in agi.attributes_dict.iteritems():
for s,v in agi.attributes_dict.items():
t.append((v,s))
t.sort(cmp=attr_dict_cmp)
if vattr():
@@ -3315,7 +3315,7 @@ def repmap_emit_code(agi, plist, kind, hash_fn):
array_limit = 2*(mx+1) # make room for input key validation
fo.add_code('const xed_uint16_t lu_table[{}] = {{'.format(array_limit))
hashes = t.keys()
hashes = list(t.keys())
hashes.sort()
# fill in the rows of the array
@@ -3495,12 +3495,12 @@ def emit_enum_info(agi):
graph."""
msge('emit_enum_info')
# make everything uppercase
nonterminals = [ s.upper() for s in agi.nonterminal_dict.keys()]
operand_types = [ s.upper() for s in agi.operand_types.keys()]
operand_widths = [ s.upper() for s in agi.operand_widths.keys()]
nonterminals = [ s.upper() for s in list(agi.nonterminal_dict.keys())]
operand_types = [ s.upper() for s in list(agi.operand_types.keys())]
operand_widths = [ s.upper() for s in list(agi.operand_widths.keys())]
operand_names = [ s.upper() for s in
agi.operand_storage.get_operands().keys() ]
list(agi.operand_storage.get_operands().keys()) ]
msge("OPERAND-NAMES " + " ".join(operand_names))
@@ -3535,7 +3535,7 @@ def emit_enum_info(agi):
#nt_enum_numeric_value -> nt_name
xed3_nt_enum_val_map = {}
upper_dict = {}
for nt_name in agi.nonterminal_dict.keys():
for nt_name in list(agi.nonterminal_dict.keys()):
nt_name_upper = nt_name.upper()
upper_dict[nt_name_upper] = nt_name
for i,upper_nt in enumerate(nonterminals):
@@ -3740,11 +3740,11 @@ def compute_iforms(options, gi, operand_storage_dict):
# printing various ways
if viform():
for iform,iilist in iforms.iteritems():
for iform,iilist in iforms.items():
msge("IFORM %s: %s" % (iform,
" ".join([x.iclass for x in iilist] )))
for iclass,iformlist in ii_iforms.iteritems():
for iclass,iformlist in ii_iforms.items():
str_iforms = {}
dups = []
for iform in iformlist:
@@ -3754,7 +3754,7 @@ def compute_iforms(options, gi, operand_storage_dict):
str_iforms[iform]=True
msge("II_IFORM %s: %s" % (iclass, " ".join(str_iforms.keys())))
msge("II_IFORM %s: %s" % (iclass, " ".join(list(str_iforms.keys()))))
if len(dups)!=0:
msge("\tDUPS: %s: %s" % (iclass," ".join(dups)))
@@ -4019,7 +4019,7 @@ def find_common_operand_sequences(agi):
msgb("Unique Operand Sequences", str(next_oid_seqeuence))
n = 0
for k in global_oid_sequences.keys():
for k in list(global_oid_sequences.keys()):
n = n + len(k.lst)
global_max_operand_sequences = n
msgb("Number of required operand sequence pointers",
@@ -4340,15 +4340,15 @@ def compress_iform_strings(values):
len(bases),
len(operand_sigs)))
if len(h) != (max( [ int(x) for x in h.keys()] )+1):
if len(h) != (max( [ int(x) for x in list(h.keys())] )+1):
print("PROBLEM IN h LENGTH")
# make an numerically indexed version of the bases table
bi = {}
for k,v in bases.iteritems():
for k,v in bases.items():
bi[v] = k
# make an numerically indexed version of the operand_sig table
oi = {}
for k,v in operand_sigs.iteritems():
for k,v in operand_sigs.items():
oi[v] = k
f = sys.stdout
@@ -4425,7 +4425,7 @@ def collect_and_emit_iforms(agi,options):
# number them from zero, per iclass
vtuples = [('INVALID', 0, 'INVALID') ]
imax = {} # maximum number of iforms per iclass
for ic,ol in iform_dict.iteritems():
for ic,ol in iform_dict.items():
ol = uniqueify(ol)
sz= len(ol)
vsub = zip([ic.upper()]*sz, # the iclass
@@ -4564,7 +4564,7 @@ def renumber_nodes_sub(options,node):
#msge("RENUMBER NODE %d becomes %d" % ( node.id, renum_node_id))
node.id = renum_node_id
# recur
for nxt in node.next.itervalues():
for nxt in node.next.values():
node_id = renumber_nodes_sub(options,nxt)
@@ -4579,8 +4579,8 @@ def merge_child_nodes(options,node):
# bit_pos* becomes a bigger range
# more "next" nodes.
tnode = {}
for k,child in node.next.iteritems(): # children # MERGING
for j in child.next.keys(): # grandchildren
for k,child in node.next.items(): # children # MERGING
for j in list(child.next.keys()): # grandchildren
bigkey = str(k) + str(j)
if vmerge():
msge("Bigkey= %s" % (bigkey))
@@ -4610,12 +4610,12 @@ def merge_nodes(options,node):
while merging:
all_match = True
decider_bits = [ node.next[k].decider_bits for k in
node.next.keys() ]
list(node.next.keys()) ]
if not all_the_same(decider_bits):
if vmerge():
msge("Not merging because unequal numbers of decider" +
" bits follow:" + str(decider_bits))
for nxt in node.next.itervalues():
for nxt in node.next.values():
msge("\tChildNode:\n" +nxt.dump_str('\t\t'))
all_match = False
break
@@ -4651,7 +4651,7 @@ def merge_nodes(options,node):
# look at all the next nodes
for child in node.next.itervalues():
for child in node.next.values():
if child.back_split_pos != None:
if vmerge():
msge("Not merging because a child is back-split")
@@ -4684,7 +4684,7 @@ def merge_nodes(options,node):
merging = False
# recur
for child in node.next.itervalues():
for child in node.next.values():
merge_nodes(options,child)
def optimize_graph(options, node):
@@ -4702,7 +4702,7 @@ def optimize_graph(options, node):
def epsilon_label_graph(options, node):
node.otherwise_ok = True
# recur
for child in node.next.itervalues():
for child in node.next.values():
epsilon_label_graph(options,child)
############################################################################
@@ -4810,7 +4810,7 @@ def print_bit_groups(bit_groups, s=''):
def emit_function_headers(fp, fo_dict):
"""For each function in the fo_dict dictionary, emit the function
prototype to the fp file emitter object."""
for fname in fo_dict.keys():
for fname in list(fo_dict.keys()):
fo = fo_dict[fname]
fp.write(fo.emit_header())
@@ -5326,7 +5326,7 @@ class all_generator_info_t(object):
def extend_operand_names_with_input_states(self):
type ='xed_uint32_t'
for operand_decider in self.common.state_space.keys():
for operand_decider in list(self.common.state_space.keys()):
#msge("STATESPACE: considering " + operand_decider)
if operand_decider not in self.operand_names:
self.operand_names[operand_decider] = type
@@ -5730,7 +5730,7 @@ def make_cpuid_mappings(agi,mappings):
# collect all unique list of cpuid bit names
cpuid_bits = {}
for vlist in mappings.itervalues():
for vlist in mappings.values():
for bit in vlist:
if bit == 'N/A':
data = bitname = 'INVALID'
@@ -5786,7 +5786,7 @@ def make_cpuid_mappings(agi,mappings):
# check that each isa set in the cpuid files has a corresponding XED_ISA_SET_ value
fail = False
for cisa in mappings.keys():
for cisa in list(mappings.keys()):
t = re.sub('XED_ISA_SET_','',cisa)
if t not in agi.all_enums['xed_isa_set_enum_t']:
fail = True
@@ -5878,7 +5878,7 @@ def emit_reg_class_enum(options, regs_list):
rclasses[fine_rclass]=True
del rclasses['INVALID']
just_rclass_names = rclasses.keys()
just_rclass_names = list(rclasses.keys())
# FIXME: would really prefer alphanumeric sort (low priority)
just_rclass_names.sort()
@@ -6268,7 +6268,7 @@ def decorate_instructions_with_exception_types(agi):
def emit_ctypes_enum(options, ctypes_dict):
ctypes_dict['INVALID']=True
type_names = ctypes_dict.keys()
type_names = list(ctypes_dict.keys())
type_names.sort(cmp=cmp_invalid)
ctypes_enum = enum_txt_writer.enum_info_t(type_names,
options.xeddir, options.gendir,
@@ -6301,12 +6301,12 @@ def emit_ctypes_mapping(options, operand_ctype_map, operand_bits_map):
ifo = function_object_t('xed_init_operand_ctypes', 'void')
for o,c in operand_ctype_map.iteritems():
for o,c in operand_ctype_map.items():
ifo.add_code_eol(
"xed_operand_ctype[XED_OPERAND_%s]=XED_OPERAND_CTYPE_%s" % (
o.upper(),c.upper()))
for o,c in operand_bits_map.iteritems():
for o,c in operand_bits_map.items():
ifo.add_code_eol("xed_operand_bits[XED_OPERAND_%s]=%s" % (o.upper(), c))
cf.write("static xed_operand_ctype_enum_t"+
@@ -6333,13 +6333,13 @@ def gen_operand_storage_fields(options,agi):
operand_fields = agi.operand_storage.get_operands()
ctypes = {} # ctypes -> True
for of in operand_fields.values():
for of in list(operand_fields.values()):
ctypes[of.ctype]=True
operand_ctype_map = {}
operand_bits_map = {}
for of in operand_fields.itervalues():
for of in operand_fields.values():
operand_ctype_map[of.name] = of.ctype
operand_bits_map[of.name] = of.bitwidth

View File

@@ -185,7 +185,7 @@ def flatten_dict(dict_with_lists):
"""Take a dict with some possible sublists, and return a list of
dicts where no rhs is a list. All possible combinations"""
retval = []
kys = dict_with_lists.keys()
kys = list(dict_with_lists.keys())
flatten_dict_sub(retval, {}, dict_with_lists,kys)
return retval
@@ -421,7 +421,7 @@ def uniqueify(values):
s = {}
for a in values:
s[a] = True
k = s.keys()
k = list(s.keys())
k.sort()
return k

View File

@@ -52,7 +52,7 @@ def test_hlist():
d[a] = 1
d[b] = 2
d[c] = 3
for k in d.keys():
for k in list(d.keys()):
print(str(k))
if __name__ == '__main__':

View File

@@ -87,7 +87,7 @@ def _is_amd3dnow(agi):
#mostly modrm-related
def _get_nested_nts(agi):
nested_nts = set()
for nt_name in agi.nonterminal_dict.keys():
for nt_name in list(agi.nonterminal_dict.keys()):
g = agi.generator_dict[nt_name]
ii = g.parser_output.instructions[0]
if genutil.field_check(ii,'iclass'):
@@ -164,7 +164,7 @@ def gen_xed3(agi,ild_info,is_3dnow,ild_patterns,
ild_codegen.dump_vv_map_lookup(agi,
vv_lu,
is_3dnow,
op_lu_map.values(),
list(op_lu_map.values()),
h_fn='xed3-phash.h')
#xed3_nt.work generates all the functions and lookup tables for
#dynamic decoding
@@ -220,14 +220,14 @@ def work(agi):
#Get dictionary with all legal values for all interesting operands
all_state_space = ild_cdict.get_all_constraints_state_space(agi)
_msg("ALL_STATE_SPACE:")
for k,v in all_state_space.items():
for k,v in list(all_state_space.items()):
_msg("%s: %s"% (k,v))
#Get widths for the operands
all_ops_widths = ild_cdict.get_state_op_widths(agi, all_state_space)
_msg("ALL_OPS_WIDTHS:")
for k,v in all_ops_widths.items():
for k,v in list(all_ops_widths.items()):
_msg("%s: %s"% (k,v))
#generate a list of pattern_t objects that describes the ISA.

View File

@@ -63,7 +63,7 @@ def _set_state_space_from_ii(agi, ii, state_space):
#look at prebindings too
#for things like ZEROING that don't have all possible
#values mentioned in patterns
for (name, binding) in ii.prebindings.items():
for (name, binding) in list(ii.prebindings.items()):
bitnum = len(binding.bit_info_list)
#dirty hack: we don't want big prebidnings to explode
@@ -152,7 +152,7 @@ def get_state_op_widths(agi, state_space):
Returns a dictionary from operand name to operands bit width
"""
widths_dict = {}
for opname,val_dict in state_space.items():
for opname,val_dict in list(state_space.items()):
if opname in agi.operand_storage.get_operands():
opnd = agi.operand_storage.get_operand(opname)
widths_dict[opname] = int(opnd.bitwidth)
@@ -387,14 +387,14 @@ def _get_united_cdict(ptrn_list, state_space, vexvalid, all_ops_widths):
#take only requested space patterns
ptrns = []
for ptrn in ptrn_list:
if vexvalid in ptrn.constraints['VEXVALID'].keys():
if vexvalid in list(ptrn.constraints['VEXVALID'].keys()):
ptrns.append(ptrn)
if len(ptrns) == 0:
return None
for ptrn in ptrns:
cnames.extend(ptrn.constraints.keys())
cnames.extend(list(ptrn.constraints.keys()))
cnames = set(cnames)
cdicts = []
@@ -515,7 +515,7 @@ class constraint_dict_t(object):
return dict_list[0]
res = constraint_dict_t(cnames=cnstr_names)
for cdict in dict_list:
for key in cdict.tuple2rule.keys():
for key in list(cdict.tuple2rule.keys()):
if key in res.tuple2rule:
msg = []
msg.append("key: %s" % (key,))
@@ -545,20 +545,20 @@ class constraint_dict_t(object):
return self.make_cdict(cnames[1:], tuple2rule)
else:
new_tuple2rule = {}
for key_tuple in tuple2rule.keys():
for key_tuple in list(tuple2rule.keys()):
for val in vals:
new_key = key_tuple + (val,)
new_tuple2rule[new_key] = self.rule
return self.make_cdict(cnames[1:], new_tuple2rule)
def get_all_keys_by_val(self, val):
return [k for k,v in self.tuple2rule.iteritems() if v == val]
return [k for k,v in self.tuple2rule.items() if v == val]
def create_tuple2int(self, all_ops_widths):
''' create the mapping of tuple to its int value '''
tuple2int = {}
int2tuple = {}
for t in self.tuple2rule.iterkeys():
for t in self.tuple2rule.keys():
res = tup2int.tuple2int(t, self.cnames, all_ops_widths)
if res in int2tuple:
err = "the tuple % and the tuple %s generate the same value:%d"
@@ -590,7 +590,7 @@ class constraint_dict_t(object):
new_cdict.int2tuple = dict((i,t) for t,i in
new_cdict.tuple2int.iteritems())
new_cdict.tuple2int.items())
return new_cdict
@@ -598,8 +598,8 @@ class constraint_dict_t(object):
''' return a tuple of the operand accessor function and the constraint
names that it represents '''
ptrn_list = self.tuple2rule.values()
if cname in _token_2_module.keys():
ptrn_list = list(self.tuple2rule.values())
if cname in list(_token_2_module.keys()):
nt_module = _token_2_module[cname]
getter_fn = nt_module.get_getter_fn(ptrn_list)
if not getter_fn:
@@ -734,5 +734,5 @@ def gen_ph_fos(agi, cdict_by_map_opcode, is_amd, log_fn,
for key in sorted(stats.keys()):
_log(log_f,"%s %s\n" % (key,stats[key]))
log_f.close()
return phash_lu,lu_fo_list,op_lu_map.values()
return phash_lu,lu_fo_list,list(op_lu_map.values())

View File

@@ -190,14 +190,14 @@ def gen_l2_func_list(agi, target_nt_dict, arg_nt_dict,
ild_t_member):
"""generate L2 functions"""
l2_func_list = []
for (nt_name,array) in target_nt_dict.iteritems():
for (nt_name,array) in target_nt_dict.items():
target_opname = array.get_target_opname()
if array.is_const_lookup_fun():
fo = gen_const_l2_function(agi, nt_name,
target_opname, ild_t_member)
l2_func_list.append(fo)
else:
for arg_nt_seq,arg_arr in arg_nt_dict.iteritems():
for arg_nt_seq,arg_arr in arg_nt_dict.items():
fo = gen_scalable_l2_function(agi, nt_name,
target_opname, ild_t_member, arg_arr, list(arg_nt_seq))
l2_func_list.append(fo)
@@ -231,7 +231,7 @@ _ordered_maps = ['']
def _test_map_all_zero(vv,phash_map_lu):
"""phash_map_lu is a dict[maps][0...255] pointing to a 2nd level lookup """
all_zero_map = {}
for xmap in phash_map_lu.keys():
for xmap in list(phash_map_lu.keys()):
omap = phash_map_lu[xmap]
all_zero=True
for i in range(0,256):
@@ -324,7 +324,7 @@ def dump_vv_map_lookup(agi,
maps = ild_info.get_maps(is_3dnow)
vv_num = [ int(x) for x in vv_lu.keys()]
vv_num = [ int(x) for x in list(vv_lu.keys())]
vv_index = max(vv_num) + 1
map_num = len(maps)
arr_name = 'xed3_phash_lu'
@@ -490,7 +490,7 @@ def gen_l1_byreg_resolution_function(agi,info_list, nt_dict, is_conflict_fun,
#if not all modrm.reg values have legal instructions defined, we don't
#have full 0-7 dict for modrm.reg here, and we can't generate the interval
#dict
if len(fun_dict.keys()) == 8:
if len(list(fun_dict.keys())) == 8:
int_dict = _gen_intervals_dict(fun_dict)
else:
int_dict = None
@@ -529,7 +529,7 @@ def gen_l1_byreg_resolution_function(agi,info_list, nt_dict, is_conflict_fun,
def _add_int_dict_dispatching(fo, int_dict, dispatch_var, data_name):
cond_starter = 'if'
for interval in int_dict.keys():
for interval in list(int_dict.keys()):
min = interval[0]
max = interval[-1]
#avoid comparing unsigned int to 0, this leads to build errors
@@ -549,7 +549,7 @@ def _add_int_dict_dispatching(fo, int_dict, dispatch_var, data_name):
def _add_switch_dispatching(fo, fun_dict, dispatch_var, data_name):
fo.add_code("switch(%s) {" % dispatch_var)
for key in fun_dict.keys():
for key in list(fun_dict.keys()):
fo.add_code('case %s:' % key)
call_stmt = '%s(%s)' % (fun_dict[key], data_name)
fo.add_code_eol(call_stmt)
@@ -584,7 +584,7 @@ def gen_l1_bymode_resolution_function(agi,info_list, nt_dict, is_conflict_fun,
#if not all modrm.reg values have legal instructions defined, we don't
#have full 0-7 dict for modrm.reg here, and we can't generate the interval
#dict
if len(fun_dict.keys()) == len(ildutil.mode_space):
if len(list(fun_dict.keys())) == len(ildutil.mode_space):
int_dict = _gen_intervals_dict(fun_dict)
else:
int_dict = None

View File

@@ -257,7 +257,7 @@ def gen_l1_functions_and_lookup(agi, united_lookup, disp_dict):
#FIXME: the bucketed function name is not self descriptive
bucket_name = 'xed_lookup_function_DISP_BUCKET_%s_l1'
cur_bucket = 0
for res_fun_list in l1_bucket_dict.values():
for res_fun_list in list(l1_bucket_dict.values()):
if len(res_fun_list) == 1:
#only one such function - we should define it as is
l1_resolution_fos.append(res_fun_list[0])
@@ -272,8 +272,8 @@ def gen_l1_functions_and_lookup(agi, united_lookup, disp_dict):
#fix references in the lookup table
for res_fun in res_fun_list:
for insn_map in l1_lookup.keys():
for opcode in l1_lookup[insn_map].keys():
for insn_map in list(l1_lookup.keys()):
for opcode in list(l1_lookup[insn_map].keys()):
cur_fn = l1_lookup[insn_map][opcode]
if cur_fn == res_fun.function_name:
l1_lookup[insn_map][opcode] = cur_buck_name
@@ -359,7 +359,7 @@ def work(agi, united_lookup, disp_nts, brdisp_nts, ild_gendir,
disp_dict = _gen_l3_array_dict(agi, disp_nts, _disp_token)
nt_arr_list = brdisp_dict.values() + disp_dict.values()
nt_arr_list = list(brdisp_dict.values()) + list(disp_dict.values())
#create function that calls all initialization functions
init_f = ild_nt.gen_init_function(nt_arr_list, 'xed_ild_disp_l3_init')
@@ -376,7 +376,7 @@ def work(agi, united_lookup, disp_nts, brdisp_nts, ild_gendir,
l2_functions = []
eosz_op = ild_eosz.get_target_opname()
easz_op = ild_easz.get_target_opname()
for nt_name,array in disp_dict.items() + brdisp_dict.items():
for nt_name,array in list(disp_dict.items()) + list(brdisp_dict.items()):
#Some DISP NTs depend on EOSZ, others on EASZ, we need to know
#that when we generate L2 functions
if eosz_op in array.get_arg_names():

View File

@@ -43,7 +43,7 @@ _easz_defines = {
_easz_defines[_easz_lookup_def_str] = len(_easz_defines)
#reverted _eosz_defines
_easz_defines_reverse = dict((v,k) for k, v in _easz_defines.iteritems())
_easz_defines_reverse = dict((v,k) for k, v in _easz_defines.items())
_easz_c_fn = 'xed-ild-easz.c'
_easz_header_fn = 'xed-ild-easz.h'
@@ -164,13 +164,13 @@ def work(agi, united_lookup, easz_nts, ild_gendir, debug):
return
nt_seq_arrays[tuple(nt_seq)] = array
#init function calls all single init functions for the created tables
init_f = ild_nt.gen_init_function(nt_seq_arrays.values(),
init_f = ild_nt.gen_init_function(list(nt_seq_arrays.values()),
'xed_ild_easz_init')
ild_nt.dump_lu_arrays(agi, nt_seq_arrays.values(), _easz_c_fn,
ild_nt.dump_lu_arrays(agi, list(nt_seq_arrays.values()), _easz_c_fn,
mbuild.join('include-private', _easz_header_fn),
init_f)
getter_fos = []
for names in nt_seq_arrays.keys():
for names in list(nt_seq_arrays.keys()):
arr = nt_seq_arrays[names]
getter_fo = ild_codegen.gen_derived_operand_getter(agi, _easz_token,
arr, list(names))

View File

@@ -203,17 +203,17 @@ def work(agi, united_lookup, eosz_nts, ild_gendir, debug):
return None
nt_seq_arrays[tuple(nt_seq)] = array
#init function calls all single init functions for the created tables
init_f = ild_nt.gen_init_function(nt_seq_arrays.values(),
init_f = ild_nt.gen_init_function(list(nt_seq_arrays.values()),
'xed_ild_eosz_init')
#dump init and lookup functions for EOSZ sequences
ild_nt.dump_lu_arrays(agi, nt_seq_arrays.values(), _eosz_c_fn,
ild_nt.dump_lu_arrays(agi, list(nt_seq_arrays.values()), _eosz_c_fn,
mbuild.join('include-private', _eosz_header_fn),
init_f)
#generate EOSZ getter functions - they get xed_decoded_inst_t*
#and return EOSZ value (corresponding to EOSZ NT sequence
#that they represent)
getter_fos = []
for names in nt_seq_arrays.keys():
for names in list(nt_seq_arrays.keys()):
arr = nt_seq_arrays[names]
getter_fo = ild_codegen.gen_derived_operand_getter(agi, _eosz_token,
arr, list(names))

View File

@@ -326,11 +326,11 @@ def work(agi, united_lookup, imm_nts, ild_gendir, eosz_dict,
nt_dict[nt_name] = array
#create function that calls all initialization functions for L3
init_f = ild_nt.gen_init_function(nt_dict.values(),
init_f = ild_nt.gen_init_function(list(nt_dict.values()),
'xed_ild_imm_l3_init')
#dump L3 functions
ild_nt.dump_lu_arrays(agi, nt_dict.values(), _l3_c_fn,
ild_nt.dump_lu_arrays(agi, list(nt_dict.values()), _l3_c_fn,
mbuild.join('include-private',_l3_header_fn),
init_f)

View File

@@ -48,7 +48,7 @@ def get_setting_nts(agi, opname):
"""
state_dict = agi.common.state_bits
nt_set = set()
for nt_name in agi.nonterminal_dict.keys():
for nt_name in list(agi.nonterminal_dict.keys()):
gi = agi.generator_dict[nt_name]
parser = gi.parser_output
for rule in parser.instructions:
@@ -290,8 +290,8 @@ def gen_lookup_array(agi, nt_seq, val_dict, opname, argnames,
(_first_indices, value) = val_dict[0]
val_dict = [([{}], value)]
for od in argnames.keys():
values = argnames[od].keys()
for od in list(argnames.keys()):
values = list(argnames[od].keys())
array.add_dimension(operand_storage.get_ctype(od),
min(values),
max(values) + 1,
@@ -404,7 +404,7 @@ def gen_nt_lookup(agi, nt_name, target_op, target_type=None, level=''):
#e.g for EOSZ base_row dict must have OSZ,MOD,REXW operands as keys
def row_match(base_row, row):
#ildutil.ild_err("ILD_DEBUG BASE ROW %s" % (base_row,))
for (op, val) in row.items():
for (op, val) in list(row.items()):
if op in base_row:
if base_row[op] != val:
return False
@@ -521,9 +521,9 @@ def _generate_lookup_function_indices(ii,state_space,argnames):
#know which value to choose.
#of course there are other ways to solve this problem, but this seems to be
#the easiest.
for bt_token in argnames.keys():
for bt_token in list(argnames.keys()):
if not (bt_token in indices):
indices[bt_token] = argnames[bt_token].keys()
indices[bt_token] = list(argnames[bt_token].keys())
### NOW, we must flatten any list-valued RHS's & return a list of

View File

@@ -250,7 +250,7 @@ class phash_t(object):
lines.append('m=%d' % self.hash_f.get_table_size())
lines.append('%s' % self.hash_f)
lines.append('tuple x -> value')
for tuple_val in self.tuple_dict.keys():
for tuple_val in list(self.tuple_dict.keys()):
x = self.t2x[tuple_val]
value = self.tuple_dict[tuple_val]
line = '%s %s -> %s' % (tuple_val,x, str(value))
@@ -278,9 +278,9 @@ class phash_t(object):
class l1_phash_t(phash_t):
def __init__(self, cdict, hash_f):
phash_t.__init__(self, cdict, hash_f)
for t,x in cdict.tuple2int.iteritems():
for t,x in cdict.tuple2int.items():
hash_val = self.hash_f.apply(x)
if hash_val in self.x2hx.values():
if hash_val in list(self.x2hx.values()):
msg = "l1_phash_t: %s\n function is not perfect!\n"
msg += 'hashval=%d , x2hx: %s' % (hash_val, self.x2hx)
ildutil.ild_err(msg)
@@ -319,7 +319,7 @@ class l2_phash_t(phash_t):
phash_t.__init__(self, cdict, hash_f)
hx2tuples = collections.defaultdict(list)
for t,x in self.cdict.tuple2int.iteritems():
for t,x in self.cdict.tuple2int.items():
hx = self.hash_f.apply(x)
if len(hx2tuples[hx]) >= _l1_bucket_max:
msg = "l2_phash_t: function does not distribute well!\n"
@@ -330,13 +330,13 @@ class l2_phash_t(phash_t):
self.hx2x[hx] = x
self.hx2phash = {}
for hx,tuples in hx2tuples.iteritems():
for hx,tuples in hx2tuples.items():
new_cdict = self.cdict.filter_tuples(tuples)
# try (1)linear, then (2)hashmul then (3) fks for the 2nd
# level of hash function.
phash = None
if _is_linear(new_cdict.int2tuple.keys()):
if _is_linear(list(new_cdict.int2tuple.keys())):
phash = _get_linear_hash_function(new_cdict)
if not phash:
phash = _find_l1_phash_mul(new_cdict)
@@ -347,7 +347,7 @@ class l2_phash_t(phash_t):
self.hx2phash[hx] = phash
else:
lines = []
for k,v in new_cdict.tuple2rule.items():
for k,v in list(new_cdict.tuple2rule.items()):
lines.append('%s -> %s'% ((k,), v))
str = '\n'.join(lines)
ildutil.ild_err("Failed to find l1 phash for dict %s" %
@@ -379,7 +379,7 @@ class l2_phash_t(phash_t):
elems = []
#invert the x2hx mapping
hx2x = dict((hx,x) for x,hx in self.x2hx.iteritems())
hx2x = dict((hx,x) for x,hx in self.x2hx.items())
for hx in range(0, self.hash_f.get_table_size()):
if hx in hx2fo:
@@ -412,7 +412,7 @@ class l2_phash_t(phash_t):
obj_type = self.cdict.strings_dict['obj_type']
const = self.cdict.strings_dict['obj_const']
hx2fo = {}
for hx,phash in self.hx2phash.items():
for hx,phash in list(self.hx2phash.items()):
fid = '%s_%d_l1' % (fname, hx)
(hx2fo_list,operand_lu_fo) = phash.gen_find_fos(fid)
hx2fo[hx] = hx2fo_list[0]
@@ -435,14 +435,14 @@ class l2_phash_t(phash_t):
lu_fname = operand_lu_fo.function_name
self.add_op_lu_function(fo, lu_fname)
self.add_find_lines(fo)
fos = hx2fo.values()
fos = list(hx2fo.values())
fos.append(fo)
#all the operand_lu_fo going to be the same so we just take the last one
return fos,operand_lu_fo
def get_size(self):
size = self.hash_f.get_table_size()
for phash in self.hx2phash.values():
for phash in list(self.hx2phash.values()):
size += phash.get_size()
return size
@@ -450,7 +450,7 @@ class l2_phash_t(phash_t):
lines = ['-----------2-LEVEL-PHASH-------------']
lines.append('m=%d' % self.hash_f.get_table_size())
lines.append('%s' % self.hash_f)
for tuple_val in self.cdict.tuple2rule.keys():
for tuple_val in list(self.cdict.tuple2rule.keys()):
lines.append('-------------------------------------')
lines.append('tuple x h(x) -> l1_phash')
x = self.cdict.tuple2int[tuple_val]
@@ -475,14 +475,14 @@ def _is_linear(keys):
def _get_linear_hash_function(cdict):
''' returns phash_t object with a linear_funct_t as the hash function'''
keylist = cdict.int2tuple.keys()
keylist = list(cdict.int2tuple.keys())
hash_f = hashlin.get_linear_hash_function(keylist)
return l1_phash_t(cdict, hash_f)
def _find_l1_phash_fks(cdict):
hashfn = hashfks.find_fks_perfect(cdict.tuple2int.values())
hashfn = hashfks.find_fks_perfect(list(cdict.tuple2int.values()))
if hashfn:
return l1_phash_t(cdict, hashfn)
return None
@@ -506,7 +506,7 @@ def _find_l1_phash_mul(cdict):
candidate_lengths = _find_candidate_lengths_mul(cdict.tuple2int)
for p in candidate_lengths:
hash_f = hashmul.hashmul_t(p)
if hash_f.is_perfect(cdict.tuple2int.itervalues()):
if hash_f.is_perfect(iter(cdict.tuple2int.values())):
return l1_phash_t(cdict, hash_f)
del hash_f
return None
@@ -549,7 +549,7 @@ def _gen_hash_one_level(cdict):
"""Generate a 1 level hash function or give up"""
# linear means all keys are sequential. not required to be zero-based.
if _is_linear(cdict.int2tuple.keys()):
if _is_linear(list(cdict.int2tuple.keys())):
return _get_linear_hash_function(cdict)
phash = _find_l1_phash_mul(cdict)

View File

@@ -54,10 +54,10 @@ class ild_storage_t(object):
def get_all_infos(self):
all_infos = []
for opcode_dict in self.lookup.itervalues():
for info_list in opcode_dict.itervalues():
for opcode_dict in self.lookup.values():
for info_list in opcode_dict.values():
all_infos.extend(info_list)
return all_infos
def get_maps(self):
return self.lookup.keys()
return list(self.lookup.keys())

View File

@@ -30,7 +30,7 @@ def _get_modules(fn):
finder = modulefinder.ModuleFinder()
finder.run_script(fn)
all = []
for m in finder.modules.itervalues():
for m in finder.modules.values():
if not isinstance(m, modulefinder.Module):
continue
if not m.__file__:

View File

@@ -142,7 +142,7 @@ class instructions_group_t(object):
groups = []
#1. generate the groups
for iclass,iforms in iarray.items():
for iclass,iforms in list(iarray.items()):
iforms.sort(cmp=cmp_iforms_by_bind_ptrn)
self._put_iclass_in_group(groups,iclass,iforms)
@@ -213,7 +213,7 @@ class ins_group_t(object):
def get_iclasses(self):
''' return a list of iclasses in the group'''
return self.iclass2iforms.keys()
return list(self.iclass2iforms.keys())
def get_iform_ids_table(self):
''' generate C style table of iform Id's.

View File

@@ -240,9 +240,9 @@ class nt_function_gen_t(object):
nt.default_action = actions.gen_return_action('')
def gen_nt_functions(self):
nonterminals = (self.nonterminals.values() +
self.decoder_nonterminals.values() +
self.decoder_ntlufs.values())
nonterminals = (list(self.nonterminals.values()) +
list(self.decoder_nonterminals.values()) +
list(self.decoder_ntlufs.values()))
for nt in nonterminals:
if nt.name in _complicated_nt:

View File

@@ -231,7 +231,7 @@ class operands_storage_t(object):
fo.add_arg('void* %s' % ret_arg)
switch_gen = codegen.c_switch_generator_t('operand',fo)
op_names = self.operand_fields.keys()
op_names = list(self.operand_fields.keys())
op_names.sort()
for op in op_names:
switch_key = "XED_OPERAND_%s" % op
@@ -260,7 +260,7 @@ class operands_storage_t(object):
fo.add_arg('xed_uint32_t %s' % in_value)
switch_gen = codegen.c_switch_generator_t('operand',fo)
op_names = self.operand_fields.keys()
op_names = list(self.operand_fields.keys())
op_names.sort()
for op in op_names:
switch_key = "XED_OPERAND_%s" % op
@@ -280,7 +280,7 @@ class operands_storage_t(object):
h_fname = get_operand_accessors_fn()
c_fname = h_fname.replace('.h', '.c')
for opname in self.operand_fields.keys():
for opname in list(self.operand_fields.keys()):
getter_fo = self._gen_op_getter_fo(opname)
setter_fo = self._gen_op_setter_fo(opname)
fo_list.append(getter_fo)
@@ -329,7 +329,7 @@ class operands_storage_t(object):
# mx_bits is a mapping from enum name to the minimal number
# of bits required to represent it
max_bits_for_enum = self._gen_max_bits_per_enum(agi.all_enums)
for op in self.operand_fields.values():
for op in list(self.operand_fields.values()):
if op.ctype in max_bits_for_enum:
needed_bits = max_bits_for_enum[op.ctype]
if op.bitwidth < needed_bits:
@@ -347,7 +347,7 @@ class operands_storage_t(object):
the accessors will cast the operand to its C type according to the
data files'''
for op in self.operand_fields.values():
for op in list(self.operand_fields.values()):
width = op.bitwidth
if width <= 8:
op.storage_type = 'xed_uint8_t'
@@ -387,7 +387,7 @@ class operands_storage_t(object):
if self.compressed:
self.bins = self._compress_operands()
operands = self.operand_fields.values()
operands = list(self.operand_fields.values())
un_compressed = list(filter(lambda x: x.compressed == False, operands ))
un_compressed.sort(cmp=cmp_operands)
@@ -403,7 +403,7 @@ class operands_storage_t(object):
bit_width=op.bitwidth, accessors='none')
else:
operands_sorted = self.operand_fields.values()
operands_sorted = list(self.operand_fields.values())
operands_sorted.sort(cmp=cmp_operands)
for op in operands_sorted:
cgen.add_var(op.name.lower(), op.storage_type,
@@ -430,7 +430,7 @@ class operands_storage_t(object):
''' calculate the number of bits required to capture the each enum.
returning a dict of enum name to the number of required bits '''
widths = {}
for (enum_name, values_list) in all_enums.items():
for (enum_name, values_list) in list(all_enums.items()):
num_values = self._get_num_elements_in_enum(values_list)
log2 = math.log(num_values,2)
needed_bits = int(math.ceil(log2))
@@ -447,7 +447,7 @@ class operands_storage_t(object):
their ctype can hold. '''
candiadtes = []
for op in self.operand_fields.values():
for op in list(self.operand_fields.values()):
# for optimization those operands are not using bit with
# FIXME: add field to the operands for excluding hot fields
# form being compressed

View File

@@ -184,7 +184,7 @@ class parse_opmap_t(object):
self.read_line(line)
def dump(self):
for g,v in self.groups.iteritems():
for g,v in self.groups.items():
print(g, ": ")
v.dump()
print("\n\n")

View File

@@ -54,7 +54,7 @@ def write_table(agi,ots):
'xed-init-operand-type-mappings.c')
fp.start()
fp.add_code("const xed_operand_type_info_t xed_operand_xtype_info[] = {")
names = ots.keys()
names = list(ots.keys())
names.sort()
names = ['INVALID'] + names
ots['INVALID'] = operand_type_t('INVALID','INVALID','0')
@@ -68,7 +68,7 @@ def write_table(agi,ots):
def write_enum(agi,ots):
"""Emit the xtypes enum"""
names = ots.keys()
names = list(ots.keys())
names.sort()
names = ['INVALID'] + names
width_enum = enum_txt_writer.enum_info_t(names,

View File

@@ -383,7 +383,7 @@ def parse_one_operand(w,
xtype = default_xtypes[oc2.upper()]
except:
s = ''
for i,v in default_xtypes.iteritems():
for i,v in default_xtypes.items():
s += "\t%10s -> %10s\n" % (i,v)
genutil.die("Parsing operand [%s]. Could not find default type for %s. xtypes=%s\nTypes=%s" % (w, oc2, str(xtypes), s))
else:

View File

@@ -664,7 +664,7 @@ class iform_builder_t(object):
self.iforms[ntname] = True
def _build(self):
self.cgen = c_class_generator_t("xed_encoder_iforms_t", var_prefix="x_")
for v in self.iforms.iterkeys():
for v in self.iforms.keys():
self.cgen.add_var(v, 'xed_uint32_t', accessors='none')
def emit_header(self):
self._build()
@@ -1867,7 +1867,7 @@ class encoder_configuration_t(object):
def reorder_encoder_rules(self,nts):
"""reorder rules so that any rules with ENCODER_PREFERRED is first
"""
for nt in nts.itervalues():
for nt in nts.values():
first_rules = []
rest_of_the_rules = []
for r in nt.rules:
@@ -2327,9 +2327,9 @@ class encoder_configuration_t(object):
def remove_deleted(self):
bad = self.deleted_unames.keys()
bad = list(self.deleted_unames.keys())
_vmsgb("BAD UNAMES", str(bad))
for ic,v in self.iarray.iteritems():
for ic,v in self.iarray.items():
x1 = len(v)
l = []
for i in v:
@@ -2342,7 +2342,7 @@ class encoder_configuration_t(object):
_vmsgb("DELETING IFORMS", "%s %d -> %d" % (ic,x1,x2))
self.iarray[ic]=l
for k in self.deleted_instructions.keys():
for k in list(self.deleted_instructions.keys()):
if k in self.iarray:
_vmsgb("DELETING", k)
del self.iarray[k]
@@ -2353,7 +2353,7 @@ class encoder_configuration_t(object):
all_iforms_list = []
i = 0
for iforms in self.iarray.itervalues():
for iforms in self.iarray.values():
for iform in iforms:
iform.rule.iform_id = i
all_iforms_list.append(iform)
@@ -2386,10 +2386,10 @@ class encoder_configuration_t(object):
self.reorder_encoder_rules(ntlufs)
if vread():
msgb("NONTERMINALS")
for nt in nts.itervalues():
for nt in nts.values():
msg( str(nt))
msgb("NTLUFS")
for ntluf in ntlufs.itervalues():
for ntluf in ntlufs.values():
msg( str(ntluf))
_vmsgb("DONE","\n\n")
@@ -2531,13 +2531,13 @@ class encoder_configuration_t(object):
template = " xed_enc_iclass2group[XED_ICLASS_%s] = %d;"
iclass2group = self.ins_groups.get_iclass2group()
for iclass,group_index in iclass2group.items():
for iclass,group_index in list(iclass2group.items()):
code = template % (iclass.upper(),group_index)
init_table.append(code)
template = " xed_enc_iclass2index_in_group[XED_ICLASS_%s] = %d;"
iclass2index_in_group = self.ins_groups.get_iclass2index_in_group()
for iclass,index in iclass2index_in_group.items():
for iclass,index in list(iclass2index_in_group.items()):
code = template % (iclass.upper(),index)
init_table.append(code)
fo.add_lines(init_table)
@@ -2752,7 +2752,7 @@ class encoder_configuration_t(object):
def look_for_encoder_inputs(self):
encoder_inputs_by_iclass = {} # dictionary mapping iclass -> set of field names
encoder_nts_by_iclass = {} # dictionary mapping iclass -> set of nt names
for iclass,iform_list in self.iarray.iteritems():
for iclass,iform_list in self.iarray.items():
encoder_field_inputs = set()
encoder_nts = set()
for iform in iform_list:
@@ -2766,7 +2766,7 @@ class encoder_configuration_t(object):
encoder_inputs_by_iclass[iclass] = encoder_field_inputs
encoder_nts_by_iclass[iclass] = encoder_nts
for iclass in encoder_inputs_by_iclass.keys():
for iclass in list(encoder_inputs_by_iclass.keys()):
fld_set = encoder_inputs_by_iclass[iclass]
nt_set = encoder_nts_by_iclass[iclass]
if vinputs():
@@ -2817,7 +2817,7 @@ class encoder_configuration_t(object):
this dictionary in each iform as iform.operand_order"""
all_operand_name_list_dict = {}
for iclass,iform_list in self.iarray.iteritems():
for iclass,iform_list in self.iarray.items():
for niform,iform in enumerate(iform_list):
ordered_operand_name_list = iform.make_operand_name_list()
key = "-".join(ordered_operand_name_list)
@@ -2832,7 +2832,7 @@ class encoder_configuration_t(object):
_vmsgb("TOTAL ENCODE OPERAND SEQUENCES: %d" % (len(all_operand_name_list_dict)))
if vopseq():
for iclass,iform_list in self.iarray.iteritems():
for iclass,iform_list in self.iarray.items():
for niform,iform in enumerate(iform_list):
msg("OPSEQ: %20s-%03d: %s" %
(iclass, niform+1,
@@ -2845,7 +2845,7 @@ class encoder_configuration_t(object):
fo = function_object_t(fname, 'void')
operands = 0 # columns
entries = 0 # rows
for oo in all_operand_name_list_dict.itervalues(): # stringkeys -> operand_order_t's
for oo in all_operand_name_list_dict.values(): # stringkeys -> operand_order_t's
for j,o in enumerate(oo.lst):
fo.add_code_eol("xed_encode_order[%d][%d]=XED_OPERAND_%s" % (oo.n,j,o))
t = len(oo.lst)
@@ -2859,17 +2859,17 @@ class encoder_configuration_t(object):
def dump(self):
msgb("NONTERMINALS")
for nt in self.nonterminals.itervalues():
for nt in self.nonterminals.values():
msg(str(nt))
msgb("SEQUENCERS")
for s in self.sequences.itervalues():
for s in self.sequences.values():
msg(str(s))
def make_sequence_functions(self):
# we pass in the list of known sequences so that we know to
# call the right kind of function from the sequence function
# we are creating.
for s in self.sequences.itervalues():
for s in self.sequences.values():
fo = s.create_function(self.sequences)
self.functions.append(fo)
@@ -2878,7 +2878,7 @@ class encoder_configuration_t(object):
NTLUF. One version does the required bindings. The other
version emits the required bytes"""
for nt in nts.itervalues():
for nt in nts.values():
_vmsgb("SORTING FOR SIZE", nt.name)
nt.sort_for_size()
if nt.is_ntluf():

View File

@@ -370,7 +370,7 @@ class xed_reader_t(object):
res = []
for i in range(0,x):
d = inst_t()
for k,v in in_rec.iteritems():
for k,v in in_rec.items():
if len(v) == 1:
setattr(d,k.lower(),v[0])
else:
@@ -385,7 +385,7 @@ class xed_reader_t(object):
"""The valies in the record are lists. Remove the lists since they are
all now singletons """
n = inst_t()
for k,v in in_rec.iteritems():
for k,v in in_rec.items():
setattr(n,k.lower(),v[0])
return n

View File

@@ -111,7 +111,7 @@ class parse_regmap_t(object):
self.read_line(line)
def dump(self):
for g,v in self.regmaps.iteritems():
for g,v in self.regmaps.items():
print(g, ": ")
v.dump()
print("\n\n")

View File

@@ -80,11 +80,11 @@ def dump_nt_enum_2_capture_fptr(agi, fname):
h_file.add_code(('static %s ' % xed3_capture_f_t) +\
'%s[XED_NONTERMINAL_LAST] = {' % lu_name)
nonterminals = agi.nonterminal_dict.keys()
nonterminals = list(agi.nonterminal_dict.keys())
invalid_line = '/*XED_NONTERMINAL_INVALID*/ (%s)0,' % xed3_capture_f_t
h_file.add_code(invalid_line)
for nt_name in agi.xed3_nt_enum_val_map.values():
for nt_name in list(agi.xed3_nt_enum_val_map.values()):
enum_val = 'XED_NONTERMINAL_%s' % nt_name.upper()
if _skip_nt(nt_name):
fn = '0'
@@ -105,7 +105,7 @@ def get_ii_constraints(ii, state_space, constraints):
#set constraints that come from operands deciders
ild_nt.add_op_deciders(ii.ipattern, state_space, constraints)
#set constraints that come from prebindings
for name,binding in ii.prebindings.items():
for name,binding in list(ii.prebindings.items()):
if binding.is_constant():
if name not in constraints:
constraints[name] = {}
@@ -119,7 +119,7 @@ def _get_all_cnames(gi):
"""
cnames = []
for rule in gi.parser_output.instructions:
cnames.extend(rule.xed3_constraints.keys())
cnames.extend(list(rule.xed3_constraints.keys()))
return set(cnames)
def _gen_cdict(agi, nt_name, all_state_space):
@@ -132,7 +132,7 @@ def _gen_cdict(agi, nt_name, all_state_space):
state_space = {}
for opname in all_state_space:
state_space[opname] = all_state_space[opname].keys()
state_space[opname] = list(all_state_space[opname].keys())
cdict_list = []
@@ -305,7 +305,7 @@ def _add_switchcase_lines(fo,
int2key = {}
key2int = {}
for key in cdict.tuple2rule.keys():
for key in list(cdict.tuple2rule.keys()):
keyval = tup2int.tuple2int(key, cdict.cnames, all_ops_widths)
#This checks for a nasty conflict that should never happen:
#when two different tuple keys have the same integer value.
@@ -570,7 +570,7 @@ def _dump_op_capture_chain_fo_lu(agi, patterns):
ild_codegen.dump_flist_2_header(agi,
_xed3_op_chain_header,
headers,
fn_2_fo.values(),
list(fn_2_fo.values()),
is_private=True)
lu_size = max(inum_2_fn.keys()) + 1
@@ -639,7 +639,7 @@ def _dump_capture_chain_fo_lu(agi, patterns):
ild_codegen.dump_flist_2_header(agi,
_xed3_chain_header,
headers,
fn_2_fo.values(),
list(fn_2_fo.values()),
is_private=True)
lu_size = max(inum_2_fn.keys()) + 1
@@ -746,7 +746,7 @@ def work(agi, all_state_space, all_ops_widths, patterns):
#generate NT capturing functions
capture_fn_list = []
for nt_name in agi.nonterminal_dict.keys():
for nt_name in list(agi.nonterminal_dict.keys()):
#skip non terminals that we don't want to capture:
#PREFIXES, AVX_SPLITTER, *ISA, etc.
if _skip_nt(nt_name):

View File

@@ -51,7 +51,7 @@ def _measure_bucket_max(table, maxbin):
okay = True
max_bucket = 0
bad_buckets = 0
for k, vl in table.iteritems():
for k, vl in table.items():
lvl = len(vl)
if lvl >= maxbin:
if lvl > max_bucket:
@@ -63,7 +63,7 @@ def _measure_bucket_max(table, maxbin):
def is_well_distributed(keylist, hash_f, maxbin):
"""populate the buckets and see if any are too big"""
table = collections.defaultdict(list)
for t,x in keylist.iteritems():
for t,x in keylist.items():
hash_val = hash_f.apply(x)
table[hash_val].append(t)