Index: examples/run_to_csv.py =================================================================== --- examples/run_to_csv.py +++ examples/run_to_csv.py @@ -7,12 +7,12 @@ data = json.load(sys.stdin) -titles = data['tests'].itervalues().next().keys() +titles = iter(data['tests'].values()).next().keys() titles.insert(0, titles.pop(titles.index("name"))) print(", ".join(titles)) -for i, result in data['tests'].items(): +for i, result in list(data['tests'].items()): for title in titles: print(result[title], end=' ') Index: lnt/lnttool/admin.py =================================================================== --- lnt/lnttool/admin.py +++ lnt/lnttool/admin.py @@ -29,7 +29,7 @@ def __init__(self, **args): self._set('verbose', args['verbose']) self._try_load_config(args['config']) - for key, value in args.items(): + for key, value in list(args.items()): self._set(key, value) self._check_and_normalize() @@ -47,7 +47,7 @@ def _try_load_config(self, filename): try: config = yaml.load(open(filename)) - for key, value in config.items(): + for key, value in list(config.items()): self._set(key, value) except IOError as e: if self.verbose or filename != _default_config_filename: Index: lnt/server/config.py =================================================================== --- lnt/server/config.py +++ lnt/server/config.py @@ -124,7 +124,7 @@ dict([(k, DBInfo.from_data(dbDirPath, v, default_email_config, 0)) - for k, v in data['databases'].items()]), + for k, v in list(data['databases'].items())]), blacklist, schemasDir, api_auth_token) @staticmethod @@ -171,7 +171,7 @@ while self.zorgURL.endswith('/'): self.zorgURL = zorgURL[:-1] self.databases = databases - for db in self.databases.values(): + for db in list(self.databases.values()): db.config = self self.api_auth_token = api_auth_token @@ -191,4 +191,4 @@ db_entry.baseline_revision) def get_database_names(self): - return self.databases.keys() + return list(self.databases.keys()) Index: lnt/server/db/rules/rule_update_fixed_regressions.py =================================================================== --- lnt/server/db/rules/rule_update_fixed_regressions.py +++ lnt/server/db/rules/rule_update_fixed_regressions.py @@ -75,7 +75,7 @@ if current is None or current < order_id: regression_newest_change[regression_id] = order_id # Order regressions by FC end order. - ordered = sorted(regression_newest_change.items(), key=lambda x: x[1]) + ordered = sorted(list(regression_newest_change.items()), key=lambda x: x[1]) to_move = ordered[0:(-1 * num_to_keep)] for r, _ in to_move: Index: lnt/server/db/rules_manager.py =================================================================== --- lnt/server/db/rules_manager.py +++ lnt/server/db/rules_manager.py @@ -64,11 +64,11 @@ and load them into the hook dict for later use. """ global HOOKS_LOADED - for name, path in load_rules().items(): + for name, path in list(load_rules().items()): globals = {} execfile(path, globals) DESCRIPTIONS[name] = globals['__doc__'] - for hook_name in HOOKS.keys(): + for hook_name in list(HOOKS.keys()): if hook_name in globals: HOOKS[hook_name].append(globals[hook_name]) HOOKS_LOADED = True Index: lnt/server/db/testsuite.py =================================================================== --- lnt/server/db/testsuite.py +++ lnt/server/db/testsuite.py @@ -402,7 +402,7 @@ if len(old_metrics) != 0: raise _MigrationError("Metrics removed: %s" % - ", ".join(old_metrics.keys())) + ", ".join(list(old_metrics.keys()))) old_run_fields = {} old_order_fields = {} @@ -429,10 +429,10 @@ if len(old_run_fields) > 0: raise _MigrationError("Run fields removed: %s" % - ", ".join(old_run_fields.keys())) + ", ".join(list(old_run_fields.keys()))) if len(old_order_fields) > 0: raise _MigrationError("Order fields removed: %s" % - ", ".join(old_order_fields.keys())) + ", ".join(list(old_order_fields.keys()))) old_machine_fields = {} for field_desc in old.get('machine_fields', []): @@ -449,7 +449,7 @@ if len(old_machine_fields) > 0: raise _MigrationError("Machine fields removed: %s" % - ", ".join(old_machine_fields.keys())) + ", ".join(list(old_machine_fields.keys()))) # The rest should just be metadata that we can upgrade return True Index: lnt/server/db/testsuitedb.py =================================================================== --- lnt/server/db/testsuitedb.py +++ lnt/server/db/testsuitedb.py @@ -27,7 +27,7 @@ def _dict_update_abort_on_duplicates(base_dict, to_merge): '''This behaves like base_dict.update(to_merge) but asserts that none of the keys in to_merge is present in base_dict yet.''' - for key, value in to_merge.items(): + for key, value in list(to_merge.items()): assert base_dict.get(key, None) is None base_dict[key] = value @@ -420,7 +420,7 @@ p = profile.Profile.fromRendered(encoded) s = ','.join('%s=%s' % (k, v) - for k, v in p.getTopLevelCounters().items()) + for k, v in list(p.getTopLevelCounters().items())) self.counters = s[:512] def getTopLevelCounters(self): @@ -753,7 +753,7 @@ if existing_value is not None: incompatible_fields.add(field.name) existing_parameters = existing_machine.parameters - for key, new_value in new_machine.parameters.items(): + for key, new_value in list(new_machine.parameters.items()): existing_value = existing_parameters.get(key, None) if new_value is None or existing_value == new_value: continue @@ -768,7 +768,7 @@ continue existing_machine.set_field(field, new_value) parameters = existing_machine.parameters - for key, new_value in new_machine.parameters.items(): + for key, new_value in list(new_machine.parameters.items()): if new_value is None and parameters.get(key, None) is not None: continue parameters[key] = new_value @@ -997,7 +997,7 @@ session.add(test) samples = [] - for key, values in test_data.items(): + for key, values in list(test_data.items()): if key == 'name' or key == "id" or key.endswith("_id"): continue field = field_dict.get(key) Index: lnt/server/reporting/analysis.py =================================================================== --- lnt/server/reporting/analysis.py +++ lnt/server/reporting/analysis.py @@ -258,7 +258,7 @@ @property def test_ids(self): - return set(key[1] for key in self.sample_map.keys()) + return set(key[1] for key in list(self.sample_map.keys())) def get_sliding_runs(self, session, run, compare_run, num_comparison_runs=0): Index: lnt/server/reporting/dailyreport.py =================================================================== --- lnt/server/reporting/dailyreport.py +++ lnt/server/reporting/dailyreport.py @@ -129,7 +129,7 @@ # Create a map from machine to max order and some history. self.prior_days_machine_order_map[i] = machine_order_map = dict( (machine, OrderAndHistory(max(orders), sorted(orders))) - for machine, orders in machine_to_all_orders.items()) + for machine, orders in list(machine_to_all_orders.items())) # Update the run list to only include the runs with that order. def is_max_order(r): Index: lnt/server/reporting/summaryreport.py =================================================================== --- lnt/server/reporting/summaryreport.py +++ lnt/server/reporting/summaryreport.py @@ -123,7 +123,7 @@ for ts in self.testsuites) self.requested_machine_ids = dict( (ts, [m.id for m in machines]) - for ts, machines in self.requested_machines.items() + for ts, machines in list(self.requested_machines.items()) ) # First, collect all the runs to summarize on, for each index in the @@ -342,7 +342,7 @@ columns = [ts.Sample.run_id, ts.Sample.test_id] columns.extend(f.column for f in ts.sample_fields) samples = session.query(*columns).filter( - ts.Sample.run_id.in_(run_id_map.keys())) + ts.Sample.run_id.in_(list(run_id_map.keys()))) for sample in samples: run = run_id_map[sample[0]] datapoints = list() @@ -421,7 +421,7 @@ return True self.indexed_data_table = {} - for key, values in self.data_table.items(): + for key, values in list(self.data_table.items()): # Ignore any test which is missing some data. if is_missing_samples(values): self.warnings.append("missing values for %r" % (key,)) @@ -444,7 +444,7 @@ def _build_normalized_data_table(self): self.normalized_data_table = {} - for key, indexed_value in self.indexed_data_table.items(): + for key, indexed_value in list(self.indexed_data_table.items()): test_name, metric, build_mode, arch, machine_id = key if test_name.startswith('Single File'): aggr = Mean @@ -463,7 +463,7 @@ def _build_final_data_tables(self): self.grouped_table = {} self.single_file_table = {} - for key, normalized_value in self.normalized_data_table.items(): + for key, normalized_value in list(self.normalized_data_table.items()): test_name, metric, build_mode, arch = key # If this isn't a single file test, add a plot for it grouped by Index: lnt/server/ui/api.py =================================================================== --- lnt/server/ui/api.py +++ lnt/server/ui/api.py @@ -407,7 +407,7 @@ .filter(ts.Sample.run_id.in_(run_ids)) result = common_fields_factory() # noinspection PyProtectedMember - result['samples'] = [{k: v for k, v in sample.items() if v is not None} + result['samples'] = [{k: v for k, v in list(sample.items()) if v is not None} for sample in [sample._asdict() for sample in q.all()]] Index: lnt/server/ui/filters.py =================================================================== --- lnt/server/ui/filters.py +++ lnt/server/ui/filters.py @@ -73,6 +73,6 @@ def register(env): - for name, object in globals().items(): + for name, object in list(globals().items()): if name.startswith('filter_'): env.filters[name[7:]] = object Index: lnt/server/ui/profile_views.py =================================================================== --- lnt/server/ui/profile_views.py +++ lnt/server/ui/profile_views.py @@ -73,7 +73,7 @@ if sample and sample.profile: p = sample.profile.load(profileDir) - return json.dumps([[n, f] for n, f in p.getFunctions().items()]) + return json.dumps([[n, f] for n, f in list(p.getFunctions().items())]) else: abort(404) @@ -93,13 +93,13 @@ sample = _get_sample(session, ts, rid, testid) if sample and sample.profile: p = sample.profile.load(profileDir) - for k, v in p.getTopLevelCounters().items(): + for k, v in list(p.getTopLevelCounters().items()): tlc.setdefault(k, [None]*len(runids))[idx] = v idx += 1 # If the 1'th counter is None for all keys, truncate the list. - if all(len(k) > 1 and k[1] is None for k in tlc.values()): - tlc = {k: [v[0]] for k, v in tlc.items()} + if all(len(k) > 1 and k[1] is None for k in list(tlc.values())): + tlc = {k: [v[0]] for k, v in list(tlc.items())} return json.dumps(tlc) Index: lnt/server/ui/regression_views.py =================================================================== --- lnt/server/ui/regression_views.py +++ lnt/server/ui/regression_views.py @@ -256,7 +256,7 @@ title = StringField(u'Title', validators=[DataRequired()]) bug = StringField(u'Bug', validators=[DataRequired()]) field_changes = MultiCheckboxField("Changes", coerce=int) - choices = RegressionState.names.items() + choices = list(RegressionState.names.items()) state = SelectField(u'State', choices=choices) edit_state = HiddenField(u'EditState', validators=[DataRequired()]) Index: lnt/server/ui/util.py =================================================================== --- lnt/server/ui/util.py +++ lnt/server/ui/util.py @@ -106,7 +106,7 @@ return pprintArgs('set', list(object)) elif isinstance(object, dict): elts = [] - for k, v in object.items(): + for k, v in list(object.items()): kr = recur(k) vr = recur(v) elts.append('%s : %s' % (kr, @@ -196,7 +196,7 @@ if class_ is not None: attrs.append('class="%s"' % (class_,)) if attributes is not None: - for key, value in attributes.items(): + for key, value in list(attributes.items()): attrs.append('%s="%s"' % (key, value)) attr_string = ' '.join(attrs) if self.data: Index: lnt/server/ui/views.py =================================================================== --- lnt/server/ui/views.py +++ lnt/server/ui/views.py @@ -677,7 +677,7 @@ # Convert the old style test parameters encoding. args = {'highlight_run': id} plot_number = 0 - for name, value in request.args.items(): + for name, value in list(request.args.items()): # If this isn't a test specification, just forward it. if not name.startswith('test.'): args[name] = value @@ -798,7 +798,7 @@ GraphParameter = namedtuple('GraphParameter', ['machine', 'test', 'field', 'field_index']) graph_parameters = [] - for name, value in request.args.items(): + for name, value in list(request.args.items()): # Plots to graph are passed as:: # # plot.=.. @@ -834,7 +834,7 @@ # Extract requested mean trend. mean_parameter = None - for name, value in request.args.items(): + for name, value in list(request.args.items()): # Mean to graph is passed as: # # mean=. @@ -867,7 +867,7 @@ # Extract requested baselines, and their titles. baseline_parameters = [] - for name, value in request.args.items(): + for name, value in list(request.args.items()): # Baselines to graph are passed as: # # baseline.title= @@ -951,8 +951,9 @@ (field.status_field.column.is_(None))) # Aggregate by revision. - data = multidict.multidict((rev, (val, date, run_id)) - for val, rev, date, run_id in q).items() + data = list(multidict.multidict((rev, (val, date, run_id)) + for val, rev, date, run_id in q) + .items()) data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache)) @@ -1013,8 +1014,8 @@ .group_by(ts.Order.llvm_project_revision, ts.Test) # Calculate geomean of each revision. - data = multidict.multidict( - ((rev, date), val) for val, rev, date in q).items() + data = list(multidict.multidict( + ((rev, date), val) for val, rev, date in q).items()) data = [(rev, [(lnt.server.reporting.analysis.calc_geomean(vals), date)]) for ((rev, date), vals) in data] @@ -1318,7 +1319,7 @@ recent_runs_by_machine[run.machine] = run # Get a sorted list of recent machines. - recent_machines = sorted(recent_runs_by_machine.keys(), + recent_machines = sorted(list(recent_runs_by_machine.keys()), key=lambda m: m.name) # We use periods in our machine names. css does not like this @@ -1493,7 +1494,7 @@ } # Get the list of available test suites. - testsuites = request.get_db().testsuite.values() + testsuites = list(request.get_db().testsuite.values()) # Gather the list of all run orders and all machines. def to_key(name): @@ -1556,7 +1557,7 @@ json_obj = dict() json_obj['ticks'] = report.report_orders data = [] - for e in report.normalized_data_table.items(): + for e in list(report.normalized_data_table.items()): header, samples = e raw_samples = samples.getvalue() data.append([header, raw_samples]) @@ -1692,7 +1693,7 @@ else: post_limit = MATRIX_LIMITS[0][0] data_parameters = [] # type: List[MatrixDataRequest] - for name, value in request.args.items(): + for name, value in list(request.args.items()): # plot.=.. if not name.startswith(str('plot.')): continue @@ -1771,7 +1772,7 @@ all_orders.add(s[1]) order_to_id[s[1]] = s[2] req.derive_stat = {} - for order, samples in req.samples.items(): + for order, samples in list(req.samples.items()): req.derive_stat[order] = mean(samples) if not all_orders: abort(404, "No data found.") Index: lnt/testing/__init__.py =================================================================== --- lnt/testing/__init__.py +++ lnt/testing/__init__.py @@ -114,7 +114,7 @@ def __init__(self, name, info={}, report_version=1): self.name = str(name) self.info = dict((str(key), str(value)) - for key, value in info.items()) + for key, value in list(info.items())) self.report_version = report_version self.check() @@ -168,7 +168,7 @@ self.end_time = normalize_time(end_time) if end_time is not None else None self.info = dict() # Convert keys/values that are not json encodable to strings. - for key, value in info.items(): + for key, value in list(info.items()): key = str(key) value = str(value) self.info[key] = value @@ -329,7 +329,7 @@ """ self.name = str(name) self.info = dict((str(key), str(value)) - for key, value in info.items()) + for key, value in list(info.items())) self.data = list(map(conv_f, data)) def render(self): @@ -552,7 +552,7 @@ # Flatten Machine.Info into machine Machine = data['Machine'] result_machine = {'name': Machine['Name']} - for key, value in Machine['Info'].items(): + for key, value in list(Machine['Info'].items()): newname = upgrade.machine_param_rename.get(key, key) if newname in result_machine: raise ValueError("Name clash for machine info '%s'" % newname) @@ -568,7 +568,7 @@ end_time = Run.get('End Time') if end_time is not None: result_run['end_time'] = end_time - for key, value in Run['Info'].items(): + for key, value in list(Run['Info'].items()): newname = upgrade.run_param_rename.get(key, key) if newname in result_run: raise ValueError("Name clash for run info '%s'" % newname) @@ -594,7 +594,7 @@ name_metric = test_Name[len(tag_dot):] found_metric = False - for oldname, newname in upgrade.metric_rename.items(): + for oldname, newname in list(upgrade.metric_rename.items()): assert(oldname.startswith('.')) if name_metric.endswith(oldname): name = name_metric[:-len(oldname)] Index: lnt/testing/profile/perf.py =================================================================== --- lnt/testing/profile/perf.py +++ lnt/testing/profile/perf.py @@ -31,12 +31,12 @@ data = cPerf.importPerf(f, nm, objdump) # Go through the data and convert counter values to percentages. - for f in data['functions'].values(): + for f in list(data['functions'].values()): fc = f['counters'] for l in f['data']: - for k, v in l[0].items(): + for k, v in list(l[0].items()): l[0][k] = 100.0 * float(v) / fc[k] - for k, v in fc.items(): + for k, v in list(fc.items()): fc[k] = 100.0 * v / data['counters'][k] return ProfileV1(data) Index: lnt/testing/profile/profile.py =================================================================== --- lnt/testing/profile/profile.py +++ lnt/testing/profile/profile.py @@ -25,7 +25,7 @@ """ Load a profile from a file. """ - for impl in lnt.testing.profile.IMPLEMENTATIONS.values(): + for impl in list(lnt.testing.profile.IMPLEMENTATIONS.values()): if impl.checkFile(f): ret = impl.deserialize(open(f, 'rb')) if ret: @@ -48,7 +48,7 @@ fd.flush() fd.seek(0) - for impl in lnt.testing.profile.IMPLEMENTATIONS.values(): + for impl in list(lnt.testing.profile.IMPLEMENTATIONS.values()): if impl.checkFile(fd.name): ret = impl.deserialize(fd) if ret: Index: lnt/testing/profile/profilev2impl.py =================================================================== --- lnt/testing/profile/profilev2impl.py +++ lnt/testing/profile/profilev2impl.py @@ -260,14 +260,14 @@ self.idx_to_name[i] = readString(fobj) self.name_to_idx = {v: k for k, v - in self.idx_to_name.items()} + in list(self.idx_to_name.items())} def upgrade(self, impl): self.idx_to_name = {} - keys = impl.getTopLevelCounters().keys() - for f in impl.getFunctions().values(): - keys += f['counters'].keys() + keys = list(impl.getTopLevelCounters().keys()) + for f in list(impl.getFunctions().values()): + keys += list(f['counters'].keys()) keys = sorted(set(keys)) self.idx_to_name = {k: v for k, v in enumerate(keys)} @@ -312,7 +312,7 @@ start = fobj.tell() for fname, f in sorted(self.impl.getFunctions().items()): self.function_offsets[fname] = fobj.tell() - start - all_counters = f['counters'].keys() + all_counters = list(f['counters'].keys()) for counters, address, text in self.impl.getCodeForFunction(fname): for k in sorted(all_counters): writeFloat(fobj, counters.get(k, 0)) @@ -536,7 +536,7 @@ def getCodeForFunction(self, fname): f = self.functions[fname] counter_gen = self.line_counters \ - .extractForFunction(fname, f['counters'].keys()) + .extractForFunction(fname, list(f['counters'].keys())) address_gen = self.line_addresses.extractForFunction(fname) text_gen = self.line_text.extractForFunction(fname) for n in xrange(f['length']): Index: lnt/tests/compile.py =================================================================== --- lnt/tests/compile.py +++ lnt/tests/compile.py @@ -1085,7 +1085,7 @@ compile_test = CompileTest() opts = compile_test.opts - for key, value in kwargs.items(): + for key, value in list(kwargs.items()): setattr(compile_test.opts, key, value) results = compile_test.run_test(compile_test.opts) Index: lnt/tests/nt.py =================================================================== --- lnt/tests/nt.py +++ lnt/tests/nt.py @@ -648,7 +648,7 @@ def execute_nt_tests(test_log, make_variables, basedir, config): report_dir = config.report_dir common_args = ['make', '-k'] - common_args.extend('%s=%s' % (k, v) for k, v in make_variables.items()) + common_args.extend('%s=%s' % (k, v) for k, v in list(make_variables.items())) if config.only_test is not None: common_args.extend(['-C', config.only_test]) @@ -911,7 +911,7 @@ print('%s: building test-suite tools' % (timestamp(), ), file=sys.stderr) args = ['make', 'tools'] - args.extend('%s=%s' % (k, v) for k, v in make_variables.items()) + args.extend('%s=%s' % (k, v) for k, v in list(make_variables.items())) build_tools_log_path = os.path.join(config.build_dir(iteration), 'build-tools.log') build_tools_log = open(build_tools_log_path, 'w') @@ -1250,7 +1250,7 @@ # Grab old make invocation. mk_vars, _ = config.compute_run_make_variables() to_exec = ['make', '-k'] - to_exec.extend('%s=%s' % (k, v) for k, v in mk_vars.items()) + to_exec.extend('%s=%s' % (k, v) for k, v in list(mk_vars.items())) # We need to run the benchmark's makefile, not the global one. if config.only_test is not None: @@ -1475,17 +1475,17 @@ collated_results[test_name] = new_entry # Double check that all values are there for all tests. - for test in collated_results.values(): + for test in list(collated_results.values()): test.check() - rerunable_benches = [x for x in collated_results.values() + rerunable_benches = [x for x in list(collated_results.values()) if x.is_rerunable()] rerunable_benches.sort(key=lambda x: x.name) # Now lets do the reruns. rerun_results = [] summary = "Rerunning {} of {} benchmarks." logger.info(summary.format(len(rerunable_benches), - len(collated_results.values()))) + len(list(collated_results.values())))) for i, bench in enumerate(rerunable_benches): logger.info("Rerunning: {} [{}/{}]".format(bench.name, @@ -2045,7 +2045,7 @@ _tools_check() nt = NTTest() - for key, value in kwargs.items(): + for key, value in list(kwargs.items()): setattr(nt.opts, key, value) results = nt.run_test(nt.opts) Index: lnt/tests/test_suite.py =================================================================== --- lnt/tests/test_suite.py +++ lnt/tests/test_suite.py @@ -538,9 +538,9 @@ early_defs[key] = value cmake_cmd = ([cmake_cmd] + - ['-D%s=%s' % (k, v) for k, v in early_defs.items()] + + ['-D%s=%s' % (k, v) for k, v in list(early_defs.items())] + cmake_flags + [self._test_suite_dir()] + - ['-D%s=%s' % (k, v) for k, v in defs.items()]) + ['-D%s=%s' % (k, v) for k, v in list(defs.items())]) if execute: self._check_call(cmake_cmd, cwd=path) @@ -751,7 +751,7 @@ raw_name == self.opts.single_result + '.test': env = {'status': is_pass} if 'metrics' in test_data: - for k, v in test_data['metrics'].items(): + for k, v in list(test_data['metrics'].items()): env[k] = v if k in LIT_METRIC_TO_LNT: env[LIT_METRIC_TO_LNT[k]] = v @@ -1145,7 +1145,7 @@ def cli_action(*args, **kwargs): test_suite = TestSuiteTest() - for key, value in kwargs.items(): + for key, value in list(kwargs.items()): setattr(test_suite.opts, key, value) results = test_suite.run_test(test_suite.opts) Index: lnt/util/ImportData.py =================================================================== --- lnt/util/ImportData.py +++ lnt/util/ImportData.py @@ -314,7 +314,7 @@ print(file=out) print("Results", file=out) print("----------------", file=out) - for kind, count in result_kinds.items(): + for kind, count in list(result_kinds.items()): print(kind, ":", count, file=out) Index: lnt/util/multidict.py =================================================================== --- lnt/util/multidict.py +++ lnt/util/multidict.py @@ -17,13 +17,13 @@ self.data[key] = [value] def items(self): - return self.data.items() + return list(self.data.items()) def values(self): - return self.data.values() + return list(self.data.values()) def keys(self): - return self.data.keys() + return list(self.data.keys()) def __len__(self): return len(self.data) Index: lnt/util/wsgi_restart.py =================================================================== --- lnt/util/wsgi_restart.py +++ lnt/util/wsgi_restart.py @@ -61,7 +61,7 @@ while True: # Check modification times on all files in sys.modules. - for module in sys.modules.values(): + for module in list(sys.modules.values()): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') Index: tests/SharedInputs/FakeCompilers/fakecompiler.py =================================================================== --- tests/SharedInputs/FakeCompilers/fakecompiler.py +++ tests/SharedInputs/FakeCompilers/fakecompiler.py @@ -153,7 +153,7 @@ fake_compilers = dict((value.compiler_name, value) - for key, value in locals().items() + for key, value in list(locals().items()) if inspect.isclass(value) and issubclass(value, FakeCompiler)) @@ -184,7 +184,7 @@ fake_compilers = dict((value.compiler_name, value) - for key, value in locals().items() + for key, value in list(locals().items()) if inspect.isclass(value) and issubclass(value, FakeCompiler)) Index: tests/server/ui/V4Pages.py =================================================================== --- tests/server/ui/V4Pages.py +++ tests/server/ui/V4Pages.py @@ -111,7 +111,7 @@ try: parser = ET.XMLParser() parser.parser.UseForeignDTD(True) - parser.entity.update((x, unichr(i)) for x, i in name2codepoint.iteritems()) + parser.entity.update((x, unichr(i)) for x, i in name2codepoint.items()) tree = ET.fromstring(html_string, parser=parser) except: # noqa FIXME: figure out what we expect this to throw. dump_html(html_string) Index: tests/server/ui/test_roundtrip.py =================================================================== --- tests/server/ui/test_roundtrip.py +++ tests/server/ui/test_roundtrip.py @@ -89,7 +89,7 @@ before_submit_run['run']['order_id'] = an_id after_submit_run['run']['order_id'] = an_id - self.assertEqual(before_submit_run.keys(), after_submit_run.keys()) + self.assertEqual(list(before_submit_run.keys()), list(after_submit_run.keys())) # Machine and run will be dicts, compare them directly. for k in ['machine', 'run']: self.assertEqual(before_submit_run[k], after_submit_run[k])