Index: lnt/trunk/lnt/external/stats/pstat.py =================================================================== --- lnt/trunk/lnt/external/stats/pstat.py +++ lnt/trunk/lnt/external/stats/pstat.py @@ -105,6 +105,7 @@ ## ## 11/08/98 ... fixed aput to output large arrays correctly +from __future__ import print_function import stats # required 3rd party module import string, copy from types import * @@ -514,14 +515,14 @@ maxsize[col] = max(map(len,items)) + extra for row in lst: if row == ['\n'] or row == '\n' or row == '' or row == ['']: - print + print() elif row == ['dashes'] or row == 'dashes': dashes = [0]*len(maxsize) for j in range(len(maxsize)): dashes[j] = '-'*(maxsize[j]-2) - print lineincustcols(dashes,maxsize) + print(lineincustcols(dashes, maxsize)) else: - print lineincustcols(row,maxsize) + print(lineincustcols(row, maxsize)) return None @@ -534,7 +535,7 @@ Returns: None """ for row in listoflists: - print lineincols(row,colsize) + print(lineincols(row, colsize)) return None @@ -547,9 +548,9 @@ """ for row in listoflists: if row[-1] == '\n': - print row, + print(row, end=' ') else: - print row + print(row) return None Index: lnt/trunk/lnt/external/stats/stats.py =================================================================== --- lnt/trunk/lnt/external/stats/stats.py +++ lnt/trunk/lnt/external/stats/stats.py @@ -48,8 +48,8 @@ argument types require different functions to be called. Having implementated the Dispatch class, however, means that to get info on a given function, you must use the REAL function name ... that is -"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine, -while "print stats.mean.__doc__" will print the doc for the Dispatch +"print(stats.lmean.__doc__)" or "print(stats.amean.__doc__)" work fine, +while "print(stats.mean.__doc__)" will print the doc for the Dispatch class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options but should otherwise be consistent with the corresponding list functions. @@ -222,6 +222,7 @@ ## changed name of skewness and askewness to skew and askew ## fixed (a)histogram (which sometimes counted points 1: - print "\nDividing percent>1 by 100 in lscoreatpercentile().\n" + print("\nDividing percent>1 by 100 in lscoreatpercentile().\n") percent = percent / 100.0 targetcf = percent*len(inlist) h, lrl, binsize, extras = histogram(inlist) @@ -535,7 +536,7 @@ except: extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): - print '\nPoints outside given histogram range =',extrapoints + print('\nPoints outside given histogram range =', extrapoints) return (bins, lowerreallimit, binsize, extrapoints) @@ -777,11 +778,11 @@ """ samples = '' while samples not in ['i','r','I','R','c','C']: - print '\nIndependent or related samples, or correlation (i,r,c): ', + print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ') samples = raw_input() if samples in ['i','I','r','R']: - print '\nComparing variances ...', + print('\nComparing variances ...', end=' ') # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112 r = obrientransform(x,y) f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1)) @@ -789,45 +790,45 @@ vartype='unequal, p='+str(round(p,4)) else: vartype='equal' - print vartype + print(vartype) if samples in ['i','I']: if vartype[0]=='e': t,p = ttest_ind(x,y,0) - print '\nIndependent samples t-test: ', round(t,4),round(p,4) + print('\nIndependent samples t-test: ', round(t, 4), round(p, 4)) else: if len(x)>20 or len(y)>20: z,p = ranksums(x,y) - print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4) + print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4)) else: u,p = mannwhitneyu(x,y) - print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4) + print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4)) else: # RELATED SAMPLES if vartype[0]=='e': t,p = ttest_rel(x,y,0) - print '\nRelated samples t-test: ', round(t,4),round(p,4) + print('\nRelated samples t-test: ', round(t, 4), round(p, 4)) else: t,p = ranksums(x,y) - print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4) + print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4)) else: # CORRELATION ANALYSIS corrtype = '' while corrtype not in ['c','C','r','R','d','D']: - print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', + print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ') corrtype = raw_input() if corrtype in ['c','C']: m,b,r,p,see = linregress(x,y) - print '\nLinear regression for continuous variables ...' + print('\nLinear regression for continuous variables ...') lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]] pstat.printcc(lol) elif corrtype in ['r','R']: r,p = spearmanr(x,y) - print '\nCorrelation for ranked variables ...' - print "Spearman's r: ",round(r,4),round(p,4) + print('\nCorrelation for ranked variables ...') + print("Spearman's r: ", round(r, 4), round(p, 4)) else: # DICHOTOMOUS r,p = pointbiserialr(x,y) - print '\nAssuming x contains a dichotomous variable ...' - print 'Point Biserial r: ',round(r,4),round(p,4) - print '\n\n' + print('\nAssuming x contains a dichotomous variable ...') + print('Point Biserial r: ', round(r, 4), round(p, 4)) + print('\n\n') return None @@ -1501,7 +1502,7 @@ bz = 1.0 if (abs(az-aold)<(EPS*abs(az))): return az - print 'a or b too big, or ITMAX too small in Betacf.' + print('a or b too big, or ITMAX too small in Betacf.') def lgammln(xx): @@ -1821,11 +1822,11 @@ lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1], [name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]] if type(fname)<>StringType or len(fname)==0: - print - print statname - print + print() + print(statname) + print() pstat.printcc(lofl) - print + print() try: if stat.shape == (): stat = stat[0] @@ -1833,8 +1834,8 @@ prob = prob[0] except: pass - print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix - print + print('Test statistic = ', round(stat, 3), ' p = ', round(prob, 3), suffix) + print() else: file = open(fname,writemode) file.write('\n'+statname+'\n\n') @@ -2417,7 +2418,7 @@ denom = N.power(amoment(a,2,dimension),1.5) zero = N.equal(denom,0) if type(denom) == N.ndarray and asum(zero) <> 0: - print "Number of zeros in askew: ",asum(zero) + print("Number of zeros in askew: ", asum(zero)) denom = denom + zero # prevent divide-by-zero return N.where(zero, 0, amoment(a,3,dimension)/denom) @@ -2436,7 +2437,7 @@ denom = N.power(amoment(a,2,dimension),2) zero = N.equal(denom,0) if type(denom) == N.ndarray and asum(zero) <> 0: - print "Number of zeros in akurtosis: ",asum(zero) + print("Number of zeros in akurtosis: ", asum(zero)) denom = denom + zero # prevent divide-by-zero return N.where(zero,0,amoment(a,4,dimension)/denom) @@ -2506,7 +2507,7 @@ dimension = 0 n = float(a.shape[dimension]) if n<20: - print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n + print("akurtosistest only valid for n>=20 ... continuing anyway, n=", n) b2 = akurtosis(a,dimension) E = 3.0*(n-1) /(n+1) varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5)) @@ -2629,7 +2630,7 @@ except: # point outside lower/upper limits extrapoints = extrapoints + 1 if (extrapoints > 0 and printextras == 1): - print '\nPoints outside given histogram range =',extrapoints + print('\nPoints outside given histogram range =', extrapoints) return (bins, lowerreallimit, binsize, extrapoints) @@ -3001,11 +3002,11 @@ """ samples = '' while samples not in ['i','r','I','R','c','C']: - print '\nIndependent or related samples, or correlation (i,r,c): ', + print('\nIndependent or related samples, or correlation (i,r,c): ', end=' ') samples = raw_input() if samples in ['i','I','r','R']: - print '\nComparing variances ...', + print('\nComparing variances ...', end=' ') # USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112 r = obrientransform(x,y) f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1)) @@ -3013,45 +3014,45 @@ vartype='unequal, p='+str(round(p,4)) else: vartype='equal' - print vartype + print(vartype) if samples in ['i','I']: if vartype[0]=='e': t,p = ttest_ind(x,y,None,0) - print '\nIndependent samples t-test: ', round(t,4),round(p,4) + print('\nIndependent samples t-test: ', round(t, 4), round(p, 4)) else: if len(x)>20 or len(y)>20: z,p = ranksums(x,y) - print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4) + print('\nRank Sums test (NONparametric, n>20): ', round(z, 4), round(p, 4)) else: u,p = mannwhitneyu(x,y) - print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4) + print('\nMann-Whitney U-test (NONparametric, ns<20): ', round(u, 4), round(p, 4)) else: # RELATED SAMPLES if vartype[0]=='e': t,p = ttest_rel(x,y,0) - print '\nRelated samples t-test: ', round(t,4),round(p,4) + print('\nRelated samples t-test: ', round(t, 4), round(p, 4)) else: t,p = ranksums(x,y) - print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4) + print('\nWilcoxon T-test (NONparametric): ', round(t, 4), round(p, 4)) else: # CORRELATION ANALYSIS corrtype = '' while corrtype not in ['c','C','r','R','d','D']: - print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', + print('\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ', end=' ') corrtype = raw_input() if corrtype in ['c','C']: m,b,r,p,see = linregress(x,y) - print '\nLinear regression for continuous variables ...' + print('\nLinear regression for continuous variables ...') lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]] pstat.printcc(lol) elif corrtype in ['r','R']: r,p = spearmanr(x,y) - print '\nCorrelation for ranked variables ...' - print "Spearman's r: ",round(r,4),round(p,4) + print('\nCorrelation for ranked variables ...') + print("Spearman's r: ", round(r, 4), round(p, 4)) else: # DICHOTOMOUS r,p = pointbiserialr(x,y) - print '\nAssuming x contains a dichotomous variable ...' - print 'Point Biserial r: ',round(r,4),round(p,4) - print '\n\n' + print('\nAssuming x contains a dichotomous variable ...') + print('Point Biserial r: ', round(r, 4), round(p, 4)) + print('\n\n') return None @@ -3284,7 +3285,7 @@ shp = N.ones(len(y.shape)) shp[0] = len(x) x.shape = shp - print x.shape, y.shape + print(x.shape, y.shape) r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0) r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0))) zerodivproblem = N.equal(r_den,0) @@ -3404,15 +3405,15 @@ pval = abs(pval) t = N.ones(pval.shape,N.float_)*50 step = N.ones(pval.shape,N.float_)*25 - print "Initial ap2t() prob calc" + print("Initial ap2t() prob calc") prob = abetai(0.5*df,0.5,float(df)/(df+t*t)) - print 'ap2t() iter: ', + print('ap2t() iter: ', end=' ') for i in range(10): - print i,' ', + print(i, ' ', end=' ') t = N.where(pval99.9,1000,t) # hit upper-boundary t = t+signs @@ -3935,7 +3936,7 @@ mask = N.clip(mask+newmask,0,1) noconverge = asum(N.equal(frozen,-1)) if noconverge <> 0 and verbose: - print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements' + print('a or b too big, or ITMAX too small in Betacf for ', noconverge, ' elements') if arrayflag: return frozen else: @@ -4020,7 +4021,7 @@ Returns: statistic, p-value ??? """ if len(para) <> len(data): - print "data and para must be same length in aglm" + print("data and para must be same length in aglm") return n = len(para) p = pstat.aunique(para) Index: lnt/trunk/lnt/lnttool/create.py =================================================================== --- lnt/trunk/lnt/lnttool/create.py +++ lnt/trunk/lnt/lnttool/create.py @@ -1,3 +1,4 @@ +from __future__ import print_function import click import platform @@ -164,19 +165,19 @@ # Execute an upgrade on the database to initialize the schema. lnt.server.db.migrate.update_path(db_path) - print 'created LNT configuration in %r' % basepath - print ' configuration file: %s' % cfg_path - print ' WSGI app : %s' % wsgi_path - print ' database file : %s' % db_path - print ' temporary dir : %s' % tmp_path - print ' host URL : %s' % hosturl - print - print 'You can execute:' - print ' %s' % wsgi_path - print 'to test your installation with the builtin server.' - print - print 'For production use configure this application to run with any' - print 'WSGI capable web server. You may need to modify the permissions' - print 'on the database and temporary file directory to allow writing' - print 'by the web app.' - print + print('created LNT configuration in %r' % basepath) + print(' configuration file: %s' % cfg_path) + print(' WSGI app : %s' % wsgi_path) + print(' database file : %s' % db_path) + print(' temporary dir : %s' % tmp_path) + print(' host URL : %s' % hosturl) + print() + print('You can execute:') + print(' %s' % wsgi_path) + print('to test your installation with the builtin server.') + print() + print('For production use configure this application to run with any') + print('WSGI capable web server. You may need to modify the permissions') + print('on the database and temporary file directory to allow writing') + print('by the web app.') + print() Index: lnt/trunk/lnt/lnttool/main.py =================================================================== --- lnt/trunk/lnt/lnttool/main.py +++ lnt/trunk/lnt/lnttool/main.py @@ -1,4 +1,5 @@ """Implement the command line 'lnt' tool.""" +from __future__ import print_function from .common import init_logger from .common import submit_options from .convert import action_convert @@ -134,11 +135,11 @@ result_url = results.get('result_url') if result_url is not None: if verbose: - print "Results available at:", result_url + print("Results available at:", result_url) else: - print result_url + print(result_url) elif verbose: - print "Results available at: no URL available" + print("Results available at: no URL available") class RunTestCLI(click.MultiCommand): @@ -167,13 +168,13 @@ import lnt.tests import inspect - print 'Available tests:' + print('Available tests:') test_names = lnt.tests.get_names() max_name = max(map(len, test_names)) for name in test_names: test_module = lnt.tests.get_module(name) description = inspect.cleandoc(test_module.__doc__) - print ' %-*s - %s' % (max_name, name, description) + print(' %-*s - %s' % (max_name, name, description)) @click.command("submit") @@ -409,7 +410,7 @@ def command_get_version(input): """print the version of a profile""" import lnt.testing.profile.profile as profile - print profile.Profile.fromFile(input).getVersion() + print(profile.Profile.fromFile(input).getVersion()) @action_profile.command("getTopLevelCounters") @@ -418,7 +419,7 @@ """print the whole-profile counter values""" import json import lnt.testing.profile.profile as profile - print json.dumps(profile.Profile.fromFile(input).getTopLevelCounters()) + print(json.dumps(profile.Profile.fromFile(input).getTopLevelCounters())) @action_profile.command("getFunctions") @@ -427,7 +428,7 @@ """print the functions in a profile""" import json import lnt.testing.profile.profile as profile - print json.dumps(profile.Profile.fromFile(input).getFunctions()) + print(json.dumps(profile.Profile.fromFile(input).getFunctions())) @action_profile.command("getCodeForFunction") @@ -437,8 +438,8 @@ """print the code/instruction for a function""" import json import lnt.testing.profile.profile as profile - print json.dumps( - list(profile.Profile.fromFile(input).getCodeForFunction(fn))) + print(json.dumps( + list(profile.Profile.fromFile(input).getCodeForFunction(fn)))) def _version_check(): @@ -472,7 +473,7 @@ if not value or ctx.resilient_parsing: return if lnt.__version__: - print "LNT %s" % (lnt.__version__,) + print("LNT %s" % (lnt.__version__, )) ctx.exit() Index: lnt/trunk/lnt/server/ui/app.py =================================================================== --- lnt/trunk/lnt/server/ui/app.py +++ lnt/trunk/lnt/server/ui/app.py @@ -1,3 +1,4 @@ +from __future__ import print_function import StringIO import logging import logging.handlers @@ -262,12 +263,12 @@ rotating.setLevel(logging.DEBUG) self.logger.addHandler(rotating) except (OSError, IOError) as e: - print >> sys.stderr, "Error making log file", \ - LOG_FILENAME, str(e) - print >> sys.stderr, "Will not log to file." + print("Error making log file", \ + LOG_FILENAME, str(e), file=sys.stderr) + print("Will not log to file.", file=sys.stderr) else: self.logger.info("Started file logging.") - print "Logging to :", LOG_FILENAME + print("Logging to :", LOG_FILENAME) else: self.config['log_file_name'] = log_file_name Index: lnt/trunk/lnt/tests/builtintest.py =================================================================== --- lnt/trunk/lnt/tests/builtintest.py +++ lnt/trunk/lnt/tests/builtintest.py @@ -2,6 +2,7 @@ Base class for builtin-in tests. """ +from __future__ import print_function import sys import os @@ -43,7 +44,7 @@ def log(self, message, ts=None): if not ts: ts = timestamp() - print >>sys.stderr, '%s: %s' % (ts, message) + print('%s: %s' % (ts, message), file=sys.stderr) @staticmethod def print_report(report, output): @@ -52,7 +53,7 @@ output_stream = sys.stdout else: output_stream = open(output, 'w') - print >> output_stream, report.render() + print(report.render(), file=output_stream) if output_stream is not sys.stdout: output_stream.close() @@ -87,4 +88,4 @@ """Print the result URL""" result_url = server_results.get('result_url', None) if result_url is not None: - print "Results available at:", server_results['result_url'] + print("Results available at:", server_results['result_url']) Index: lnt/trunk/lnt/tests/compile.py =================================================================== --- lnt/trunk/lnt/tests/compile.py +++ lnt/trunk/lnt/tests/compile.py @@ -1,4 +1,5 @@ """Single file compile-time performance testing""" +from __future__ import print_function import errno import hashlib import json @@ -759,8 +760,8 @@ # Set up the sandbox. global g_output_dir if not os.path.exists(opts.sandbox_path): - print >>sys.stderr, "%s: creating sandbox: %r" % ( - timestamp(), opts.sandbox_path) + print("%s: creating sandbox: %r" % ( + timestamp(), opts.sandbox_path), file=sys.stderr) os.mkdir(opts.sandbox_path) if opts.timestamp_build: fmt_timestamp = timestamp().replace(' ', '_').replace(':', '-') @@ -888,10 +889,10 @@ # Show the tests, if requested. if opts.show_tests: - print >>sys.stderr, 'Available Tests' + print('Available Tests', file=sys.stderr) for name in sorted(set(name for name, _ in all_tests)): - print >>sys.stderr, ' %s' % (name,) - print + print(' %s' % (name, ), file=sys.stderr) + print() raise SystemExit # Find the tests to run. Index: lnt/trunk/lnt/tests/nt.py =================================================================== --- lnt/trunk/lnt/tests/nt.py +++ lnt/trunk/lnt/tests/nt.py @@ -1,4 +1,5 @@ """LLVM test-suite compile and execution tests""" +from __future__ import print_function import csv import os import platform @@ -64,8 +65,8 @@ cmdstr = ' '.join(args) if 'cwd' in kwargs: - print >>self._log, "# In working dir: " + kwargs['cwd'] - print >>self.log, cmdstr + print("# In working dir: " + kwargs['cwd'], file=self._log) + print(cmdstr, file=self.log) self._log.flush() p = subprocess.Popen(args, stdout=self._log, stderr=self._log, @@ -530,7 +531,7 @@ # parallel build options to the test. test_modules.sort() - print >>sys.stderr, '%s: executing test modules' % (timestamp(),) + print('%s: executing test modules' % (timestamp(), ), file=sys.stderr) results = [] for name in test_modules: # First, load the test module file. @@ -655,10 +656,10 @@ if config.use_isolation: # Write out the sandbox profile. sandbox_profile_path = os.path.join(basedir, "isolation.sb") - print >>sys.stderr, "%s: creating sandbox profile %r" % ( - timestamp(), sandbox_profile_path) + print("%s: creating sandbox profile %r" % ( + timestamp(), sandbox_profile_path), file=sys.stderr) with open(sandbox_profile_path, 'w') as f: - print >>f, """ + print(""" ;; Sandbox profile for isolation test access. (version 1) @@ -678,44 +679,44 @@ (regex #"^/private/tmp/") (regex #"^/private/var/folders/") (regex #"^/dev/") - (regex #"^%s"))""" % (basedir,) + (regex #"^%s"))""" % (basedir, ), file=f) common_args = ['sandbox-exec', '-f', sandbox_profile_path] +\ common_args # Run a separate 'make build' step if --build-threads was given. if config.build_threads > 0: args = common_args + ['-j', str(config.build_threads), 'build'] - print >>test_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) + print('%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)), file=test_log) test_log.flush() - print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % ( - timestamp(), config.build_threads) + print('%s: building "nightly tests" with -j%u...' % ( + timestamp(), config.build_threads), file=sys.stderr) res = execute_command(test_log, basedir, args, report_dir) if res != 0: - print >> sys.stderr, "Failure while running make build! " \ - "See log: %s" % test_log.name + print("Failure while running make build! " \ + "See log: %s" % test_log.name, file=sys.stderr) # Then 'make report'. args = common_args + ['-j', str(config.threads), 'report', 'report.%s.csv' % config.test_style] - print >>test_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) + print('%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)), file=test_log) test_log.flush() # FIXME: We shouldn't need to set env=os.environ here, but if we don't # somehow MACOSX_DEPLOYMENT_TARGET gets injected into the environment on OS # X (which changes the driver behavior and causes generally weirdness). - print >>sys.stderr, '%s: executing "nightly tests" with -j%u...' % ( - timestamp(), config.threads) + print('%s: executing "nightly tests" with -j%u...' % ( + timestamp(), config.threads), file=sys.stderr) res = execute_command(test_log, basedir, args, report_dir) if res != 0: - print >> sys.stderr, "Failure while running nightly tests! "\ - "See log: %s" % test_log.name + print("Failure while running nightly tests! "\ + "See log: %s" % test_log.name, file=sys.stderr) # Keep a mapping of mangled test names, to the original names in the @@ -867,10 +868,10 @@ def prepare_report_dir(config): # Set up the sandbox. sandbox_path = config.sandbox_path - print sandbox_path + print(sandbox_path) if not os.path.exists(sandbox_path): - print >>sys.stderr, "%s: creating sandbox: %r" % ( - timestamp(), sandbox_path) + print("%s: creating sandbox: %r" % ( + timestamp(), sandbox_path), file=sys.stderr) os.mkdir(sandbox_path) # Create the per-test directory. @@ -908,15 +909,15 @@ def update_tools(make_variables, config, iteration): """Update the test suite tools. """ - print >>sys.stderr, '%s: building test-suite tools' % (timestamp(),) + print('%s: building test-suite tools' % (timestamp(), ), file=sys.stderr) args = ['make', 'tools'] args.extend('%s=%s' % (k, v) for k, v in make_variables.items()) build_tools_log_path = os.path.join(config.build_dir(iteration), 'build-tools.log') build_tools_log = open(build_tools_log_path, 'w') - print >>build_tools_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) + print('%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)), file=build_tools_log) build_tools_log.flush() res = execute_command(build_tools_log, config.build_dir(iteration), args, config.report_dir) @@ -947,12 +948,12 @@ args.extend(['--target=%s' % config.target]) - print >>configure_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) + print('%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)), file=configure_log) configure_log.flush() - print >>sys.stderr, '%s: configuring...' % timestamp() + print('%s: configuring...' % timestamp(), file=sys.stderr) res = execute_command(configure_log, basedir, args, config.report_dir) configure_log.close() if res != 0: @@ -968,15 +969,15 @@ obj_path = os.path.join(basedir, suffix) src_path = os.path.join(config.test_suite_root, suffix) if not os.path.exists(obj_path): - print '%s: initializing test dir %s' % (timestamp(), suffix) + print('%s: initializing test dir %s' % (timestamp(), suffix)) os.mkdir(obj_path) shutil.copyfile(os.path.join(src_path, 'Makefile'), os.path.join(obj_path, 'Makefile')) def run_test(nick_prefix, iteration, config): - print >>sys.stderr, "%s: checking source versions" % ( - timestamp(),) + print("%s: checking source versions" % ( + timestamp(), ), file=sys.stderr) test_suite_source_version = get_source_version(config.test_suite_root) @@ -989,11 +990,11 @@ config) # Scan for LNT-based test modules. - print >>sys.stderr, "%s: scanning for LNT-based test modules" % ( - timestamp(),) + print("%s: scanning for LNT-based test modules" % ( + timestamp(), ), file=sys.stderr) test_modules = list(scan_for_test_modules(config)) - print >>sys.stderr, "%s: found %d LNT-based test modules" % ( - timestamp(), len(test_modules)) + print("%s: found %d LNT-based test modules" % ( + timestamp(), len(test_modules)), file=sys.stderr) nick = nick_prefix if config.auto_name: @@ -1001,7 +1002,7 @@ cc_info = config.cc_info cc_nick = '%s_%s' % (cc_info.get('cc_name'), cc_info.get('cc_build')) nick += "__%s__%s" % (cc_nick, cc_info.get('cc_target').split('-')[0]) - print >>sys.stderr, "%s: using nickname: %r" % (timestamp(), nick) + print("%s: using nickname: %r" % (timestamp(), nick), file=sys.stderr) basedir = prepare_build_dir(config, iteration) @@ -1009,7 +1010,7 @@ # cause make horrible fits). start_time = timestamp() - print >>sys.stderr, '%s: starting test in %r' % (start_time, basedir) + print('%s: starting test in %r' % (start_time, basedir), file=sys.stderr) # Configure the test suite. if config.run_configure or not os.path.exists(os.path.join( @@ -1054,9 +1055,9 @@ else: test_namespace = 'nightlytest' if run_nightly_test: - print >>sys.stderr, '%s: loading nightly test data...' % timestamp() + print('%s: loading nightly test data...' % timestamp(), file=sys.stderr) # If nightly test went screwy, it won't have produced a report. - print build_report_path + print(build_report_path) if not os.path.exists(build_report_path): fatal('nightly test failed, no report generated') @@ -1076,7 +1077,7 @@ existing_tests.add(s.name) test_samples.extend(results) - print >>sys.stderr, '%s: capturing machine information' % (timestamp(),) + print('%s: capturing machine information' % (timestamp(), ), file=sys.stderr) # Collect the machine and run info. # # FIXME: Import full range of data that the Clang tests are using? @@ -1154,19 +1155,19 @@ if name in target: logger.warning("parameter %r overwrote existing value: %r" % (name, target.get(name))) - print target, name, value + print(target, name, value) target[name] = value # Generate the test report. lnt_report_path = config.report_path(iteration) - print >>sys.stderr, '%s: generating report: %r' % (timestamp(), - lnt_report_path) + print('%s: generating report: %r' % (timestamp(), + lnt_report_path), file=sys.stderr) machine = lnt.testing.Machine(nick, machine_info) run = lnt.testing.Run(start_time, end_time, info=run_info) report = lnt.testing.Report(machine, run, test_samples) lnt_report_file = open(lnt_report_path, 'w') - print >>lnt_report_file, report.render() + print(report.render(), file=lnt_report_file) lnt_report_file.close() return report @@ -1236,7 +1237,7 @@ assert len(to_go) >= 1, "Missing at least one accounting file." for path in to_go: - print "Removing:", path + print("Removing:", path) os.remove(path) @@ -1706,16 +1707,16 @@ reports = [] for i in range(opts.multisample): - print >>sys.stderr, "%s: (multisample) running iteration %d" %\ - (timestamp(), i) + print("%s: (multisample) running iteration %d" %\ + (timestamp(), i), file=sys.stderr) report = run_test(opts.label, i, config) reports.append(report) # Create the merged report. # # FIXME: Do a more robust job of merging the reports? - print >>sys.stderr, "%s: (multisample) creating merged report" % ( - timestamp(),) + print("%s: (multisample) creating merged report" % ( + timestamp(), ), file=sys.stderr) machine = reports[0].machine run = reports[0].run run.end_time = reports[-1].run.end_time @@ -1726,7 +1727,7 @@ lnt_report_path = config.report_path(None) report = lnt.testing.Report(machine, run, test_samples) lnt_report_file = open(lnt_report_path, 'w') - print >>lnt_report_file, report.render() + print(report.render(), file=lnt_report_file) lnt_report_file.close() else: @@ -1742,7 +1743,7 @@ lnt_report_path = config.report_path(None) lnt_report_file = open(lnt_report_path, 'w') - print >>lnt_report_file, test_results.render() + print(test_results.render(), file=lnt_report_file) lnt_report_file.close() merge_run = 'replace' Index: lnt/trunk/lnt/tests/test_suite.py =================================================================== --- lnt/trunk/lnt/tests/test_suite.py +++ lnt/trunk/lnt/tests/test_suite.py @@ -1,4 +1,5 @@ """LLVM test-suite""" +from __future__ import print_function import subprocess import tempfile import json @@ -971,7 +972,7 @@ "iprofiler -timeprofiler -I 40u") cmd_iprofiler = cmd + ['-DTEST_SUITE_RUN_UNDER=' + iprofiler] - print ' '.join(cmd_iprofiler) + print(' '.join(cmd_iprofiler)) out = subprocess.check_output(cmd_iprofiler) Index: lnt/trunk/lnt/util/ImportData.py =================================================================== --- lnt/trunk/lnt/util/ImportData.py +++ lnt/trunk/lnt/util/ImportData.py @@ -1,3 +1,4 @@ +from __future__ import print_function from lnt.util import NTEmailReport from contextlib import closing from lnt.util import logger @@ -200,18 +201,18 @@ # Print the generic import information. if 'import_file' in result: - print >>out, "Importing %r" % os.path.basename(result['import_file']) + print("Importing %r" % os.path.basename(result['import_file']), file=out) if result['success']: - print >>out, "Import succeeded." - print >>out + print("Import succeeded.", file=out) + print(file=out) else: out.flush() - print >>err, "Import Failed:" - print >>err, "%s\n" % result['error'] + print("Import Failed:", file=err) + print("%s\n" % result['error'], file=err) message = result.get('message', None) if message: - print >>err, "%s\n" % message - print >>err, "--------------" + print("%s\n" % message, file=err) + print("--------------", file=err) err.flush() return @@ -223,15 +224,15 @@ # List the parameter sets, if interesting. show_pset = len(test_results) > 1 if show_pset: - print >>out, "Parameter Sets" - print >>out, "--------------" + print("Parameter Sets", file=out) + print("--------------", file=out) for i, info in enumerate(test_results): - print >>out, "P%d: %s" % (i, info['pset']) - print >>out + print("P%d: %s" % (i, info['pset']), file=out) + print(file=out) total_num_tests = sum([len(item['results']) for item in test_results]) - print >>out, "--- Tested: %d tests --" % total_num_tests + print("--- Tested: %d tests --" % total_num_tests, file=out) test_index = 0 result_kinds = collections.Counter() for i, item in enumerate(test_results): @@ -270,51 +271,51 @@ if show_pset: name = 'P%d :: %s' % (i, name) - print >>out, "%s: %s (%d of %d)" % (result_string, name, - test_index, total_num_tests) + print("%s: %s (%d of %d)" % (result_string, name, + test_index, total_num_tests), file=out) if result_info: - print >>out, "%s TEST '%s' %s" % ('*'*20, name, '*'*20) - print >>out, result_info - print >>out, "*" * 20 + print("%s TEST '%s' %s" % ('*'*20, name, '*'*20), file=out) + print(result_info, file=out) + print("*" * 20, file=out) if 'original_run' in result: - print >>out, ("This submission is a duplicate of run %d, " - "already in the database.") % result['original_run'] - print >>out + print(("This submission is a duplicate of run %d, " + "already in the database.") % result['original_run'], file=out) + print(file=out) if result['report_to_address']: - print >>out, "Report emailed to: %r" % result['report_to_address'] - print >>out + print("Report emailed to: %r" % result['report_to_address'], file=out) + print(file=out) # Print the processing times. - print >>out, "Processing Times" - print >>out, "----------------" - print >>out, "Load : %.2fs" % result['load_time'] - print >>out, "Import : %.2fs" % result['import_time'] - print >>out, "Report : %.2fs" % result['report_time'] - print >>out, "Total : %.2fs" % result['total_time'] - print >>out + print("Processing Times", file=out) + print("----------------", file=out) + print("Load : %.2fs" % result['load_time'], file=out) + print("Import : %.2fs" % result['import_time'], file=out) + print("Report : %.2fs" % result['report_time'], file=out) + print("Total : %.2fs" % result['total_time'], file=out) + print(file=out) # Print the added database items. total_added = (result['added_machines'] + result['added_runs'] + result['added_tests'] + result.get('added_samples', 0)) if total_added: - print >>out, "Imported Data" - print >>out, "-------------" + print("Imported Data", file=out) + print("-------------", file=out) if result['added_machines']: - print >>out, "Added Machines: %d" % result['added_machines'] + print("Added Machines: %d" % result['added_machines'], file=out) if result['added_runs']: - print >>out, "Added Runs : %d" % result['added_runs'] + print("Added Runs : %d" % result['added_runs'], file=out) if result['added_tests']: - print >>out, "Added Tests : %d" % result['added_tests'] + print("Added Tests : %d" % result['added_tests'], file=out) if result.get('added_samples', 0): - print >>out, "Added Samples : %d" % result['added_samples'] - print >>out - print >>out, "Results" - print >>out, "----------------" + print("Added Samples : %d" % result['added_samples'], file=out) + print(file=out) + print("Results", file=out) + print("----------------", file=out) for kind, count in result_kinds.items(): - print >>out, kind, ":", count + print(kind, ":", count, file=out) def import_from_string(config, db_name, db, session, ts_name, data, Index: lnt/trunk/lnt/util/ServerUtil.py =================================================================== --- lnt/trunk/lnt/util/ServerUtil.py +++ lnt/trunk/lnt/util/ServerUtil.py @@ -1,6 +1,7 @@ """ Utility for submitting files to a web server over HTTP. """ +from __future__ import print_function import sys import urllib import urllib2 @@ -19,7 +20,7 @@ try: error = json.loads(reply) except ValueError: - print "error: {}".format(reply) + print("error: {}".format(reply)) return sys.stderr.write("error: lnt server: {}\n".format(error.get('error'))) message = error.get('message', '') @@ -55,13 +56,13 @@ return json.loads(result_data) except Exception: import traceback - print "Unable to load result, not a valid JSON object." - print - print "Traceback:" + print("Unable to load result, not a valid JSON object.") + print() + print("Traceback:") traceback.print_exc() - print - print "Result:" - print "error:", result_data + print() + print("Result:") + print("error:", result_data) return Index: lnt/trunk/lnt/util/wsgi_restart.py =================================================================== --- lnt/trunk/lnt/util/wsgi_restart.py +++ lnt/trunk/lnt/util/wsgi_restart.py @@ -1,4 +1,5 @@ # This code lifted from the mod_wsgi docs. +from __future__ import print_function import os import sys import signal @@ -18,8 +19,8 @@ def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() - print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path) - print >> sys.stderr, '%s Triggering process restart.' % prefix + print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr) + print('%s Triggering process restart.' % prefix, file=sys.stderr) os.kill(os.getpid(), signal.SIGINT) @@ -116,6 +117,6 @@ _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() - print >> sys.stderr, '%s Starting change monitor.' % prefix + print('%s Starting change monitor.' % prefix, file=sys.stderr) _running = True _thread.start()