Index: lnt/tests/nt.py =================================================================== --- lnt/tests/nt.py +++ lnt/tests/nt.py @@ -31,1200 +31,7 @@ from lnt.util import ImportData import builtintest - -class TestModule(object): - """ - Base class for extension test modules. - """ - - def __init__(self): - self._log = None - - def main(self): - raise NotImplementedError - - def execute_test(self, options): - raise RuntimeError("Abstract Method.") - - def _execute_test(self, test_log, options): - self._log = test_log - try: - return self.execute_test(options) - finally: - self._log = None - - @property - def log(self): - """Get the test log output stream.""" - if self._log is None: - raise ValueError("log() unavailable outside test execution") - return self._log - - -class TestConfiguration(object): - """Store and calculate important paths and options for this test based - on the command line arguments. This object is stateless and only - based on the command line arguments! Options which take a long - time to calculate are cached, since we are stateless this is okay. - - """ - - def __init__(self, opts, start_time): - """Prepare the configuration: - opts -- the command line options object - start_time -- the time the program was invoked as a string - """ - assert type(opts) == dict, "Options must be a dict." - self.opts = opts - self.__dict__.update(opts) - self.start_time = start_time - - # Report directory cache. - self._report_dir = None - # Compiler interrogation is a lot of work, this will cache it. - self._cc_info = None - # Getting compiler version spawns subprocesses, cache it. - self._get_source_version = None - self.rerun_test = None - - @property - def report_dir(self): - """Get the (possibly cached) path to the directory where test suite - will be placed. Report dir is a directory within the sandbox which - is either "build" or a timestamped directory based on """ - if self._report_dir is not None: - return self._report_dir - - if self.timestamp_build: - ts = self.start_time.replace(' ', '_').replace(':', '-') - build_dir_name = "test-%s" % ts - else: - build_dir_name = "build" - basedir = os.path.join(self.sandbox_path, build_dir_name) - # Canonicalize paths, in case we are using e.g. an NFS remote mount. - # - # FIXME: This should be eliminated, along with the realpath call below. - basedir = os.path.realpath(basedir) - self._report_dir = basedir - return basedir - - def report_path(self, iteration): - """Path to a single run's JSON results file.""" - return os.path.join(self.build_dir(iteration), 'report.json') - - def build_dir(self, iteration): - """Path of the build dir within the report dir. iteration -- the - iteration number if multisample otherwise None. - When multisample is off report_dir == build_dir. - """ - # Do nothing in single-sample build, because report_dir and the - # build_dir is the same directory. - if iteration is None: - return self.report_dir - - # Create the directory for individual iteration. - return os.path.join(self.report_dir, "sample-%d" % iteration) - - @property - def target_flags(self): - """Computed target flags list.""" - # Compute TARGET_FLAGS. - target_flags = [] - - # FIXME: Eliminate this blanket option. - target_flags.extend(self.cflags) - - if self.cflag_string: - # FIXME: This isn't generally OK on Windows :/ - target_flags.extend(_unix_quote_args(self.cflag_string)) - - # Pass flags to backend. - for f in self.mllvm: - target_flags.extend(['-mllvm', f]) - - if self.arch is not None: - target_flags.append('-arch') - target_flags.append(self.arch) - if self.isysroot is not None: - target_flags.append('-isysroot') - target_flags.append(self.isysroot) - return target_flags - - @property - def cc_info(self): - """Discovered compiler information from the cc under test. Cached - because discovery is slow. - - """ - if self._cc_info is None: - self._cc_info = lnt.testing.util.compilers.get_cc_info( - self.cc_under_test, - self.target_flags) - return self._cc_info - - @property - def target(self): - """Discovered compiler's target information.""" - # Get compiler info. - cc_target = self.cc_info.get('cc_target') - return cc_target - - @property - def llvm_source_version(self): - """The version of llvm from llvm_src_root.""" - if self.llvm_src_root: - if self._get_source_version is None: - self._get_source_version = get_source_version( - self.llvm_src_root) - return self._get_source_version - else: - return None - - @property - def qemu_user_mode_command(self): - """ The command used for qemu user mode """ - assert self.qemu_user_mode - qemu_cmd_line = [self.qemu_user_mode] + self.qemu_flags - if self.qemu_string: - qemu_cmd_line += _unix_quote_args(self.qemu_string) - return ' '.join(qemu_cmd_line) - - @property - def generate_report_script(self): - """ The path to the report generation script. """ - return os.path.join(self.test_suite_root, "GenerateReport.pl") - - def build_report_path(self, iteration): - """The path of the results.csv file which each run of the test suite - will produce. - iteration -- the multisample iteration number otherwise None.""" - report_path = os.path.join(self.build_dir(iteration)) - if self.only_test is not None: - report_path = os.path.join(report_path, self.only_test) - report_path = os.path.join(report_path, 'report.%s.csv' % - self.test_style) - return report_path - - def test_log_path(self, iteration): - """The path of the log file for the build. - iteration -- the multisample iteration number otherwise None.""" - return os.path.join(self.build_dir(iteration), 'test.log') - - def compute_run_make_variables(self): - """Compute make variables from command line arguments and compiler. - Returns a dict of make_variables as well as a public version - with the remote options removed. - - """ - cc_info = self.cc_info - # Set the make variables to use. - make_variables = { - 'TARGET_CC': self.cc_reference, - 'TARGET_CXX': self.cxx_reference, - 'TARGET_LLVMGCC': self.cc_under_test, - 'TARGET_LLVMGXX': self.cxx_under_test, - 'TARGET_FLAGS': ' '.join(self.target_flags), - } - - # Compute TARGET_LLCFLAGS, for TEST=nightly runs. - if self.test_style == "nightly": - # Compute TARGET_LLCFLAGS. - target_llcflags = [] - if self.mcpu is not None: - target_llcflags.append('-mcpu') - target_llcflags.append(self.mcpu) - if self.relocation_model is not None: - target_llcflags.append('-relocation-model') - target_llcflags.append(self.relocation_model) - if self.disable_fp_elim: - target_llcflags.append('-disable-fp-elim') - make_variables['TARGET_LLCFLAGS'] = ' '.join(target_llcflags) - - # Set up environment overrides if requested, to effectively - # run under the specified Darwin iOS simulator. - # - # See /D/P/../Developer/Tools/RunPlatformUnitTests. - if self.ios_simulator_sdk is not None: - make_variables['EXECUTION_ENVIRONMENT_OVERRIDES'] = ' '.join( - ['DYLD_FRAMEWORK_PATH="%s"' % self.ios_simulator_sdk, - 'DYLD_LIBRARY_PATH=""', - 'DYLD_ROOT_PATH="%s"' % self.ios_simulator_sdk, - 'DYLD_NEW_LOCAL_SHARED_REGIONS=YES', - 'DYLD_NO_FIX_PREBINDING=YES', - 'IPHONE_SIMULATOR_ROOT="%s"' % self.ios_simulator_sdk, - 'CFFIXED_USER_HOME="%s"' % os.path.expanduser( - "~/Library/Application Support/iPhone Simulator/User")]) - - # Pick apart the build mode. - build_mode = self.build_mode - if build_mode.startswith("Debug"): - build_mode = build_mode[len("Debug"):] - make_variables['ENABLE_OPTIMIZED'] = '0' - elif build_mode.startswith("Unoptimized"): - build_mode = build_mode[len("Unoptimized"):] - make_variables['ENABLE_OPTIMIZED'] = '0' - elif build_mode.startswith("Release"): - build_mode = build_mode[len("Release"):] - make_variables['ENABLE_OPTIMIZED'] = '1' - else: - fatal('invalid build mode: %r' % self.build_mode) - - while build_mode: - for (name, key) in (('+Asserts', 'ENABLE_ASSERTIONS'), - ('+Checks', 'ENABLE_EXPENSIVE_CHECKS'), - ('+Coverage', 'ENABLE_COVERAGE'), - ('+Debug', 'DEBUG_SYMBOLS'), - ('+Profile', 'ENABLE_PROFILING')): - if build_mode.startswith(name): - build_mode = build_mode[len(name):] - make_variables[key] = '1' - break - else: - fatal('invalid build mode: %r' % self.build_mode) - - # Assertions are disabled by default. - if 'ENABLE_ASSERTIONS' in make_variables: - del make_variables['ENABLE_ASSERTIONS'] - else: - make_variables['DISABLE_ASSERTIONS'] = '1' - - # Set the optimization level options. - make_variables['OPTFLAGS'] = self.optimize_option - if self.optimize_option == '-Os': - make_variables['LLI_OPTFLAGS'] = '-O2' - make_variables['LLC_OPTFLAGS'] = '-O2' - else: - make_variables['LLI_OPTFLAGS'] = self.optimize_option - make_variables['LLC_OPTFLAGS'] = self.optimize_option - - # Set test selection variables. - if not self.test_cxx: - make_variables['DISABLE_CXX'] = '1' - if not self.test_jit: - make_variables['DISABLE_JIT'] = '1' - if not self.test_llc: - make_variables['DISABLE_LLC'] = '1' - if not self.test_lto: - make_variables['DISABLE_LTO'] = '1' - if self.test_llcbeta: - make_variables['ENABLE_LLCBETA'] = '1' - if self.test_small: - make_variables['SMALL_PROBLEM_SIZE'] = '1' - if self.test_large: - if self.test_small: - fatal('the --small and --large options are mutually exclusive') - make_variables['LARGE_PROBLEM_SIZE'] = '1' - if self.test_benchmarking_only: - make_variables['BENCHMARKING_ONLY'] = '1' - if self.test_integrated_as: - make_variables['TEST_INTEGRATED_AS'] = '1' - if self.liblto_path: - make_variables['LD_ENV_OVERRIDES'] = ( - 'env DYLD_LIBRARY_PATH=%s' % os.path.dirname( - self.liblto_path)) - - if self.threads > 1 or self.build_threads > 1: - make_variables['ENABLE_PARALLEL_REPORT'] = '1' - - # Select the test style to use. - if self.test_style == "simple": - # We always use reference outputs with TEST=simple. - make_variables['ENABLE_HASHED_PROGRAM_OUTPUT'] = '1' - make_variables['USE_REFERENCE_OUTPUT'] = '1' - make_variables['TEST'] = self.test_style - - # Set CC_UNDER_TEST_IS_CLANG when appropriate. - if cc_info.get('cc_name') in ('apple_clang', 'clang'): - make_variables['CC_UNDER_TEST_IS_CLANG'] = '1' - elif cc_info.get('cc_name') in ('llvm-gcc',): - make_variables['CC_UNDER_TEST_IS_LLVM_GCC'] = '1' - elif cc_info.get('cc_name') in ('gcc',): - make_variables['CC_UNDER_TEST_IS_GCC'] = '1' - - # Convert the target arch into a make variable, to allow more - # target based specialization (e.g., - # CC_UNDER_TEST_TARGET_IS_ARMV7). - if '-' in cc_info.get('cc_target', ''): - arch_name = cc_info.get('cc_target').split('-', 1)[0] - make_variables['CC_UNDER_TEST_TARGET_IS_' + arch_name.upper()] = '1' - - # Set LLVM_RELEASE_IS_PLUS_ASSERTS when appropriate, to allow - # testing older LLVM source trees. - llvm_source_version = self.llvm_source_version - if (llvm_source_version and llvm_source_version.isdigit() and - int(llvm_source_version) < 107758): - make_variables['LLVM_RELEASE_IS_PLUS_ASSERTS'] = 1 - - # Set ARCH appropriately, based on the inferred target. - # - # FIXME: We should probably be more strict about this. - cc_target = cc_info.get('cc_target') - llvm_arch = self.llvm_arch - if cc_target and llvm_arch is None: - # cc_target is expected to be a (GCC style) target - # triple. Pick out the arch component, and then try to - # convert it to an LLVM nightly test style architecture - # name, which is of course totally different from all of - # GCC names, triple names, LLVM target names, and LLVM - # triple names. Stupid world. - # - # FIXME: Clean this up once everyone is on 'lnt runtest - # nt' style nightly testing. - arch = cc_target.split('-', 1)[0].lower() - if (len(arch) == 4 and arch[0] == 'i' and arch.endswith('86') and - arch[1] in '3456789'): # i[3-9]86 - llvm_arch = 'x86' - elif arch in ('x86_64', 'amd64'): - llvm_arch = 'x86_64' - elif arch in ('powerpc', 'powerpc64', 'ppu'): - llvm_arch = 'PowerPC' - elif (arch == 'arm' or arch.startswith('armv') or - arch == 'thumb' or arch.startswith('thumbv') or - arch == 'xscale'): - llvm_arch = 'ARM' - elif arch in ('aarch64', 'arm64'): - llvm_arch = 'AArch64' - elif arch.startswith('alpha'): - llvm_arch = 'Alpha' - elif arch.startswith('sparc'): - llvm_arch = 'Sparc' - elif arch in ('mips', 'mipsel', 'mips64', 'mips64el'): - llvm_arch = 'Mips' - - if llvm_arch is not None: - make_variables['ARCH'] = llvm_arch - else: - warning("unable to infer ARCH, some tests may not run correctly!") - - # Add in any additional make flags passed in via --make-param. - for entry in self.make_parameters: - if '=' not in entry: - name, value = entry, '' - else: - name, value = entry.split('=', 1) - - make_variables[name] = value - - # Set remote execution variables, if used. - if self.remote: - # make a copy of args for report, without remote options. - public_vars = make_variables.copy() - make_variables['REMOTE_HOST'] = self.remote_host - make_variables['REMOTE_USER'] = self.remote_user - make_variables['REMOTE_PORT'] = str(self.remote_port) - make_variables['REMOTE_CLIENT'] = self.remote_client - else: - public_vars = make_variables - - # Set qemu user mode variables, if used. - if self.qemu_user_mode: - make_variables['USER_MODE_EMULATION'] = '1' - make_variables['RUNUNDER'] = self.qemu_user_mode_command - - # Set USE_PERF flag, if specified. - if self.use_perf: - make_variables['USE_PERF'] = '1' - - return make_variables, public_vars - -### - -def scan_for_test_modules(config): - base_modules_path = os.path.join(config.test_suite_root, 'LNTBased') - if config.only_test is None: - test_modules_path = base_modules_path - elif config.only_test.startswith('LNTBased'): - test_modules_path = os.path.join(config.test_suite_root, config.only_test) - else: - return - - # We follow links here because we want to support the ability for having - # various "suites" of LNTBased tests in separate repositories, and allowing - # users to just checkout them out elsewhere and link them into their LLVM - # test-suite source tree. - for dirpath,dirnames,filenames in os.walk(test_modules_path, - followlinks = True): - # Ignore the example tests, unless requested. - if not config.include_test_examples and 'Examples' in dirnames: - dirnames.remove('Examples') - - # Check if this directory defines a test module. - if 'TestModule' not in filenames: - continue - - # If so, don't traverse any lower. - del dirnames[:] - - # Add to the list of test modules. - assert dirpath.startswith(base_modules_path + '/') - yield dirpath[len(base_modules_path) + 1:] - -def execute_command(test_log, basedir, args, report_dir): - logfile = test_log - - if report_dir is not None: - logfile = subprocess.PIPE - # Open a duplicated logfile at the global dir. - _, logname = os.path.split(test_log.name) - global_log_path = os.path.join(report_dir, logname) - global_log = open(global_log_path, 'a+') - - p = subprocess.Popen(args=args, stdin=None, stdout=logfile, - stderr=subprocess.STDOUT, cwd=basedir, - env=os.environ) - - if report_dir is not None: - while p.poll() is None: - l = p.stdout.readline() - if len(l) > 0: - test_log.write(l) - global_log.write(l) - - global_log.close() - - return p.wait() - -# FIXME: Support duplicate logfiles to global directory. -def execute_test_modules(test_log, test_modules, test_module_variables, - basedir, config): - # For now, we don't execute these in parallel, but we do forward the - # parallel build options to the test. - test_modules.sort() - - print >>sys.stderr, '%s: executing test modules' % (timestamp(),) - results = [] - for name in test_modules: - # First, load the test module file. - locals = globals = {} - test_path = os.path.join(config.test_suite_root, 'LNTBased', name) - test_obj_path = os.path.join(basedir, 'LNTBased', name) - module_path = os.path.join(test_path, 'TestModule') - module_file = open(module_path) - try: - exec module_file in locals, globals - except: - info = traceback.format_exc() - fatal("unable to import test module: %r\n%s" % ( - module_path, info)) - - # Lookup and instantiate the test class. - test_class = globals.get('test_class') - if test_class is None: - fatal("no 'test_class' global in import test module: %r" % ( - module_path,)) - try: - test_instance = test_class() - except: - fatal("unable to instantiate test class for: %r" % module_path) - - if not isinstance(test_instance, TestModule): - fatal("invalid test class (expected lnt.tests.nt.TestModule " - "subclass) for: %r" % module_path) - - # Create the per test variables, and ensure the output directory exists. - variables = test_module_variables.copy() - variables['MODULENAME'] = name - variables['SRCROOT'] = test_path - variables['OBJROOT'] = test_obj_path - mkdir_p(test_obj_path) - - # Execute the tests. - try: - test_samples = test_instance._execute_test(test_log, variables) - except: - info = traceback.format_exc() - fatal("exception executing tests for: %r\n%s" % ( - module_path, info)) - - # Check that the test samples are in the expected format. - is_ok = True - try: - test_samples = list(test_samples) - for item in test_samples: - if not isinstance(item, lnt.testing.TestSamples): - is_ok = False - break - except: - is_ok = False - if not is_ok: - fatal("test module did not return samples list: %r" % ( - module_path,)) - - results.append((name, test_samples)) - - return results - -def compute_test_module_variables(make_variables, config): - # Set the test module options, which we try and restrict to a tighter subset - # than what we pass to the LNT makefiles. - test_module_variables = { - 'CC' : make_variables['TARGET_LLVMGCC'], - 'CXX' : make_variables['TARGET_LLVMGXX'], - 'CFLAGS' : (make_variables['TARGET_FLAGS'] + ' ' + - make_variables['OPTFLAGS']), - 'CXXFLAGS' : (make_variables['TARGET_FLAGS'] + ' ' + - make_variables['OPTFLAGS']) } - - # Add the remote execution variables. - if config.remote: - test_module_variables['REMOTE_HOST'] = make_variables['REMOTE_HOST'] - test_module_variables['REMOTE_USER'] = make_variables['REMOTE_USER'] - test_module_variables['REMOTE_PORT'] = make_variables['REMOTE_PORT'] - test_module_variables['REMOTE_CLIENT'] = make_variables['REMOTE_CLIENT'] - - # Add miscellaneous optional variables. - if 'LD_ENV_OVERRIDES' in make_variables: - value = make_variables['LD_ENV_OVERRIDES'] - assert value.startswith('env ') - test_module_variables['LINK_ENVIRONMENT_OVERRIDES'] = value[4:] - - # This isn't possible currently, just here to mark what the option variable - # would be called. - if 'COMPILE_ENVIRONMENT_OVERRIDES' in make_variables: - test_module_variables['COMPILE_ENVIRONMENT_OVERRIDES'] = \ - make_variables['COMPILE_ENVIRONMENT_OVERRIDES'] - - if 'EXECUTION_ENVIRONMENT_OVERRIDES' in make_variables: - test_module_variables['EXECUTION_ENVIRONMENT_OVERRIDES'] = \ - make_variables['EXECUTION_ENVIRONMENT_OVERRIDES'] - - # We pass the test execution values as variables too, this might be better - # passed as actual arguments. - test_module_variables['THREADS'] = config.threads - test_module_variables['BUILD_THREADS'] = config.build_threads or \ - config.threads - return test_module_variables - -def execute_nt_tests(test_log, make_variables, basedir, config): - report_dir = config.report_dir - common_args = ['make', '-k'] - common_args.extend('%s=%s' % (k,v) for k,v in make_variables.items()) - if config.only_test is not None: - common_args.extend(['-C',config.only_test]) - - # If we are using isolation, run under sandbox-exec. - if config.use_isolation: - # Write out the sandbox profile. - sandbox_profile_path = os.path.join(basedir, "isolation.sb") - print >>sys.stderr, "%s: creating sandbox profile %r" % ( - timestamp(), sandbox_profile_path) - with open(sandbox_profile_path, 'w') as f: - print >>f, """ -;; Sandbox profile for isolation test access. -(version 1) - -;; Allow everything by default, and log debug messages on deny. -(allow default) -(debug deny) - -;; Deny all file writes by default. -(deny file-write*) - -;; Deny all network access by default. -(deny network*) - -;; Explicitly allow writes to temporary directories, /dev/, and the sandbox -;; output directory. -(allow file-write* (regex #"^/private/var/tmp/") - (regex #"^/private/tmp/") - (regex #"^/private/var/folders/") - (regex #"^/dev/") - (regex #"^%s"))""" % (basedir,) - common_args = ['sandbox-exec', '-f', sandbox_profile_path] + common_args - - # Run a separate 'make build' step if --build-threads was given. - if config.build_threads > 0: - args = common_args + ['-j', str(config.build_threads), 'build'] - print >>test_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) - test_log.flush() - - print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % ( - timestamp(), config.build_threads) - res = execute_command(test_log, basedir, args, report_dir) - if res != 0: - print >> sys.stderr, "Failure while running make build! See log: %s"%(test_log.name) - - # Then 'make report'. - args = common_args + ['-j', str(config.threads), - 'report', 'report.%s.csv' % config.test_style] - print >>test_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) - test_log.flush() - - # FIXME: We shouldn't need to set env=os.environ here, but if we don't - # somehow MACOSX_DEPLOYMENT_TARGET gets injected into the environment on OS - # X (which changes the driver behavior and causes generally weirdness). - print >>sys.stderr, '%s: executing "nightly tests" with -j%u...' % ( - timestamp(), config.threads) - - res = execute_command(test_log, basedir, args, report_dir) - - if res != 0: - print >> sys.stderr, "Failure while running nightly tests! See log: %s" % (test_log.name) - -# Keep a mapping of mangled test names, to the original names in the test-suite. -TEST_TO_NAME = {} -KNOWN_SAMPLE_KEYS = ('compile', 'exec', 'hash', - 'gcc.compile', 'bc.compile', 'llc.compile', - 'llc-beta.compile', 'jit.compile', 'gcc.exec', 'llc.exec', - 'llc-beta.exec', 'jit.exec') - - -def load_nt_report_file(report_path, config): - # Compute the test samples to report. - sample_keys = [] - - def append_to_sample_keys(tup): - stat = tup[1] - assert stat in KNOWN_SAMPLE_KEYS - if stat not in config.exclude_stat_from_submission: - sample_keys.append(tup) - if config.test_style == "simple": - test_namespace = 'nts' - time_stat = '' - # for now, user time is the unqualified Time stat - if config.test_time_stat == "real": - time_stat = 'Real_' - append_to_sample_keys((True, 'compile', 'CC_' + time_stat + 'Time', - None, 'CC', float)) - append_to_sample_keys((False, 'hash', 'CC_Hash', None, 'CC', str)) - append_to_sample_keys((True, 'exec', 'Exec_' + time_stat + 'Time', - None, 'Exec', float)) - else: - test_namespace = 'nightlytest' - append_to_sample_keys((True, 'gcc.compile', 'GCCAS', 'time')) - append_to_sample_keys((True, 'bc.compile', 'Bytecode', 'size')) - if config.test_llc: - append_to_sample_keys((True, 'llc.compile', 'LLC compile', 'time')) - if config.test_llcbeta: - append_to_sample_keys((True, 'llc-beta.compile', - 'LLC-BETA compile', 'time')) - if config.test_jit: - append_to_sample_keys((True, 'jit.compile', 'JIT codegen', 'time')) - append_to_sample_keys((True, 'gcc.exec', 'GCC', 'time')) - if config.test_llc: - append_to_sample_keys((True, 'llc.exec', 'LLC', 'time')) - if config.test_llcbeta: - append_to_sample_keys((True, 'llc-beta.exec', 'LLC-BETA', 'time')) - if config.test_jit: - append_to_sample_keys((True, 'jit.exec', 'JIT', 'time')) - - # Load the report file. - report_file = open(report_path, 'rb') - reader_it = iter(csv.reader(report_file)) - - # Get the header. - header = reader_it.next() - if header[0] != 'Program': - fatal('unexpected report file, missing header') - - # Verify we have the keys we expect. - if 'Program' not in header: - fatal('missing key %r in report header' % 'Program') - for item in sample_keys: - required = item[0] - header_name = item[2] - if required and header_name not in header: - fatal('missing key %r in report header' % header_name) - - # We don't use the test info, currently. - test_info = {} - test_samples = [] - for row in reader_it: - record = dict(zip(header, row)) - - program = record['Program'] - - if config.only_test is not None: - program = os.path.join(config.only_test, program) - if config.rerun_test is not None: - program = os.path.join(config.rerun_test, program) - - program_real = program - program_mangled = program.replace('.','_') - test_base_name = program_mangled - - # Check if this is a subtest result, in which case we ignore missing - # values. - if '_Subtest_' in test_base_name: - is_subtest = True - test_base_name = test_base_name.replace('_Subtest_', '.') - - else: - is_subtest = False - - test_base_name = '%s.%s' % (test_namespace, test_base_name) - - TEST_TO_NAME[test_base_name] = program_real - - for info in sample_keys: - if len(info) == 4: - required, name, key, tname = info - success_key = None - conv_f = float - else: - required, name, key, tname, success_key, conv_f = info - - test_name = '%s.%s' % (test_base_name, name) - if not required and key not in record: - continue - value = record[key] - if success_key is None: - success_value = value - else: - success_value = record[success_key] - - # FIXME: Move to simpler and more succinct format, using .failed. - if success_value == '*': - if is_subtest: - continue - status_value = lnt.testing.FAIL - elif success_value == 'xfail': - status_value = lnt.testing.XFAIL - else: - status_value = lnt.testing.PASS - - if test_namespace == 'nightlytest': - test_samples.append(lnt.testing.TestSamples( - test_name + '.success', - [status_value != lnt.testing.FAIL], test_info)) - else: - if status_value != lnt.testing.PASS: - test_samples.append(lnt.testing.TestSamples( - test_name + '.status', [status_value], test_info)) - if value != '*': - sample_test_name = test_name - if tname is not None: - sample_test_name += '.' + tname - test_samples.append(lnt.testing.TestSamples( - sample_test_name, [conv_f(value)], test_info, - conv_f=conv_f)) - - report_file.close() - - return test_samples - -def prepare_report_dir(config): - # Set up the sandbox. - sandbox_path = config.sandbox_path - print sandbox_path - if not os.path.exists(sandbox_path): - print >>sys.stderr, "%s: creating sandbox: %r" % ( - timestamp(), sandbox_path) - os.mkdir(sandbox_path) - - # Create the per-test directory. - report_dir = config.report_dir - if os.path.exists(report_dir): - needs_clean = True - else: - needs_clean = False - os.mkdir(report_dir) - - # Unless not using timestamps, we require the report dir not to exist. - if needs_clean and config.timestamp_build: - fatal('refusing to reuse pre-existing build dir %r' % report_dir) - -def prepare_build_dir(config, iteration) : - # report_dir is supposed to be canonicalized, so we do not need to - # call os.path.realpath before mkdir. - build_dir = config.build_dir(iteration) - if iteration is None: - return build_dir - - if os.path.exists(build_dir): - needs_clean = True - else: - needs_clean = False - os.mkdir(build_dir) - - # Unless not using timestamps, we require the basedir not to exist. - if needs_clean and config.timestamp_build: - fatal('refusing to reuse pre-existing build dir %r' % build_dir) - return build_dir - -def update_tools(make_variables, config, iteration): - """Update the test suite tools. """ - - print >>sys.stderr, '%s: building test-suite tools' % (timestamp(),) - args = ['make', 'tools'] - args.extend('%s=%s' % (k,v) for k,v in make_variables.items()) - build_tools_log_path = os.path.join(config.build_dir(iteration), - 'build-tools.log') - build_tools_log = open(build_tools_log_path, 'w') - print >>build_tools_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) - build_tools_log.flush() - res = execute_command(build_tools_log, config.build_dir(iteration), - args, config.report_dir) - build_tools_log.close() - if res != 0: - fatal('Unable to build tools, aborting! See log: %s'%(build_tools_log_path)) - -def configure_test_suite(config, iteration): - """Run configure on the test suite.""" - - basedir = config.build_dir(iteration) - configure_log_path = os.path.join(basedir, 'configure.log') - configure_log = open(configure_log_path, 'w') - - args = [os.path.realpath(os.path.join(config.test_suite_root, - 'configure'))] - if config.without_llvm: - args.extend(['--without-llvmsrc', '--without-llvmobj']) - else: - args.extend(['--with-llvmsrc=%s' % config.llvm_src_root, - '--with-llvmobj=%s' % config.llvm_obj_root]) - - if config.test_suite_externals: - args.append('--with-externals=%s' % - os.path.realpath(config.test_suite_externals)) - - print >>configure_log, '%s: running: %s' % (timestamp(), - ' '.join('"%s"' % a - for a in args)) - configure_log.flush() - - print >>sys.stderr, '%s: configuring...' % timestamp() - res = execute_command(configure_log, basedir, args, config.report_dir) - configure_log.close() - if res != 0: - fatal('Configure failed, log is here: %r' % configure_log_path) - -def copy_missing_makefiles(config, basedir): - """When running with only_test something, makefiles will be missing, - so copy them into place. """ - suffix = '' - for component in config.only_test.split('/'): - suffix = os.path.join(suffix, component) - obj_path = os.path.join(basedir, suffix) - src_path = os.path.join(config.test_suite_root, suffix) - if not os.path.exists(obj_path): - print '%s: initializing test dir %s' % (timestamp(), suffix) - os.mkdir(obj_path) - shutil.copyfile(os.path.join(src_path, 'Makefile'), - os.path.join(obj_path, 'Makefile')) - -def run_test(nick_prefix, iteration, config): - print >>sys.stderr, "%s: checking source versions" % ( - timestamp(),) - - test_suite_source_version = get_source_version(config.test_suite_root) - - # Compute the make variables. - make_variables, public_make_variables = config.compute_run_make_variables() - - # Compute the test module variables, which are a restricted subset of the - # make variables. - test_module_variables = compute_test_module_variables(make_variables, config) - - # Scan for LNT-based test modules. - print >>sys.stderr, "%s: scanning for LNT-based test modules" % ( - timestamp(),) - test_modules = list(scan_for_test_modules(config)) - print >>sys.stderr, "%s: found %d LNT-based test modules" % ( - timestamp(), len(test_modules)) - - nick = nick_prefix - if config.auto_name: - # Construct the nickname from a few key parameters. - cc_info = config.cc_info - cc_nick = '%s_%s' % (cc_info.get('cc_name'), cc_info.get('cc_build')) - nick += "__%s__%s" % (cc_nick, cc_info.get('cc_target').split('-')[0]) - print >>sys.stderr, "%s: using nickname: %r" % (timestamp(), nick) - - basedir = prepare_build_dir(config, iteration) - - # FIXME: Auto-remove old test directories in the source directory (which - # cause make horrible fits). - - start_time = timestamp() - print >>sys.stderr, '%s: starting test in %r' % (start_time, basedir) - - - # Configure the test suite. - if config.run_configure or not os.path.exists(os.path.join( - basedir, 'Makefile.config')): - configure_test_suite(config, iteration) - - # If running with --only-test, creating any dirs which might be missing and - # copy Makefiles. - if config.only_test is not None and not config.only_test.startswith("LNTBased"): - copy_missing_makefiles(config, basedir) - - # If running without LLVM, make sure tools are up to date. - if config.without_llvm: - update_tools(make_variables, config, iteration) - - # Always blow away any existing report. - build_report_path = config.build_report_path(iteration) - if os.path.exists(build_report_path): - os.remove(build_report_path) - - # Execute the tests. - test_log = open(config.test_log_path(iteration), 'w') - - # Run the make driven tests if needed. - run_nightly_test = (config.only_test is None or - not config.only_test.startswith("LNTBased")) - if run_nightly_test: - execute_nt_tests(test_log, make_variables, basedir, config) - - # Run the extension test modules, if needed. - test_module_results = execute_test_modules(test_log, test_modules, - test_module_variables, basedir, - config) - test_log.close() - - end_time = timestamp() - - # Load the nightly test samples. - if config.test_style == "simple": - test_namespace = 'nts' - else: - test_namespace = 'nightlytest' - if run_nightly_test: - print >>sys.stderr, '%s: loading nightly test data...' % timestamp() - # If nightly test went screwy, it won't have produced a report. - print build_report_path - if not os.path.exists(build_report_path): - fatal('nightly test failed, no report generated') - - test_samples = load_nt_report_file(build_report_path, config) - else: - test_samples = [] - - # Merge in the test samples from all of the test modules. - existing_tests = set(s.name for s in test_samples) - for module,results in test_module_results: - for s in results: - if s.name in existing_tests: - fatal("test module %r added duplicate test: %r" % ( - module, s.name)) - existing_tests.add(s.name) - test_samples.extend(results) - - print >>sys.stderr, '%s: capturing machine information' % (timestamp(),) - # Collect the machine and run info. - # - # FIXME: Import full range of data that the Clang tests are using? - machine_info = {} - machine_info['hardware'] = capture(["uname","-m"], - include_stderr=True).strip() - machine_info['os'] = capture(["uname","-sr"], include_stderr=True).strip() - if config.cc_reference is not None: - machine_info['gcc_version'] = capture( - [config.cc_reference, '--version'], - include_stderr=True).split('\n')[0] - - # FIXME: We aren't getting the LLCBETA options. - run_info = {} - run_info['tag'] = test_namespace - run_info.update(config.cc_info) - - # Capture sw_vers if this looks like Darwin. - if 'Darwin' in machine_info['os']: - run_info['sw_vers'] = capture(['sw_vers'], include_stderr=True).strip() - - # Query remote properties if in use. - if config.remote: - remote_args = [config.remote_client, - "-l", config.remote_user, - "-p", str(config.remote_port), - config.remote_host] - run_info['remote_uname'] = capture(remote_args + ["uname", "-a"], - include_stderr=True).strip() - - # Capture sw_vers if this looks like Darwin. - if 'Darwin' in run_info['remote_uname']: - run_info['remote_sw_vers'] = capture(remote_args + ["sw_vers"], - include_stderr=True).strip() - - # Query qemu user mode properties if in use. - if config.qemu_user_mode: - run_info['qemu_user_mode'] = config.qemu_user_mode_command - - # Add machine dependent info. - if config.use_machdep_info: - machdep_info = machine_info - else: - machdep_info = run_info - - machdep_info['uname'] = capture(["uname","-a"], include_stderr=True).strip() - machdep_info['name'] = capture(["uname","-n"], include_stderr=True).strip() - - # FIXME: Hack, use better method of getting versions. Ideally, from binaries - # so we are more likely to be accurate. - if config.llvm_source_version is not None: - run_info['llvm_revision'] = config.llvm_source_version - run_info['test_suite_revision'] = test_suite_source_version - run_info.update(public_make_variables) - - # Set the run order from the user, if given. - if config.run_order is not None: - run_info['run_order'] = config.run_order - - else: - # Otherwise, use the inferred run order from the compiler. - run_info['run_order'] = config.cc_info['inferred_run_order'] - - # Add any user specified parameters. - for target,params in ((machine_info, config.machine_parameters), - (run_info, config.run_parameters)): - for entry in params: - if '=' not in entry: - name,value = entry,'' - else: - name,value = entry.split('=', 1) - if name in target: - warning("user parameter %r overwrote existing value: %r" % ( - name, target.get(name))) - print target,name,value - target[name] = value - - # Generate the test report. - lnt_report_path = config.report_path(iteration) - print >>sys.stderr, '%s: generating report: %r' % (timestamp(), - lnt_report_path) - machine = lnt.testing.Machine(nick, machine_info) - run = lnt.testing.Run(start_time, end_time, info = run_info) - - report = lnt.testing.Report(machine, run, test_samples) - lnt_report_file = open(lnt_report_path, 'w') - print >>lnt_report_file,report.render() - lnt_report_file.close() - - return report - -### - -def _construct_report_path(basedir, only_test, test_style, file_type="csv"): - """Get the full path to report files in the sandbox. - """ - report_path = os.path.join(basedir) - if only_test is not None: - report_path = os.path.join(report_path, only_test) - report_path = os.path.join(report_path, ('report.%s.' % test_style) + file_type) - return report_path - - -def rerun_test(config, name, num_times): - """Take the test at name, and rerun it num_times with the previous settings - stored in config. - - """ - # Extend the old log file. - logfile = open(config.test_log_path(None), 'a') - - # Grab the real test name instead of the LNT benchmark URL. - real_name = TEST_TO_NAME["nts." + name] - - relative_test_path = os.path.dirname(real_name) - test_name = os.path.basename(real_name) - - test_full_path = os.path.join( - config.report_dir, relative_test_path) - - assert os.path.exists(test_full_path), "Previous test directory not there?" + \ - test_full_path - - results = [] - for _ in xrange(0, num_times): - test_results = _execute_test_again(config, - test_name, - test_full_path, - relative_test_path, - logfile) - results.extend(test_results) - - # Check we got an exec and status from each run. - assert len(results) >= num_times, "Did not get all the runs?" + str(results) - - logfile.close() - return results - - -def _prepare_testsuite_for_rerun(test_name, test_full_path, config): - """Rerun step 1: wipe out old files to get ready for rerun. - - """ - output = os.path.join(test_full_path, "Output/") - test_path_prefix = output + test_name + "." - os.remove(test_path_prefix + "out-" + config.test_style) - - # Remove all the test-suite accounting files for this benchmark - to_go = glob.glob(test_path_prefix + "*.time") - to_go.extend(glob.glob(test_path_prefix + "*.txt")) - to_go.extend(glob.glob(test_path_prefix + "*.csv")) - - assert len(to_go) >= 1, "Missing at least one accounting file." - for path in to_go: - print "Removing:", path - os.remove(path) - - -def _execute_test_again(config, test_name, test_path, test_relative_path, logfile): - """(Re)Execute the benchmark of interest. """ - - _prepare_testsuite_for_rerun(test_name, test_path, config) - - # Grab old make invocation. - mk_vars, _ = config.compute_run_make_variables() - to_exec = ['make', '-k'] - to_exec.extend('%s=%s' % (k, v) for k, v in mk_vars.items()) - - # We need to run the benchmark's makefile, not the global one. - if config.only_test is not None: - to_exec.extend(['-C', config.only_test]) - else: - if test_relative_path: - to_exec.extend(['-C', test_relative_path]) - config.rerun_test = test_relative_path - # The target for the specific benchmark. - # Make target. - benchmark_report_target = "Output/" + test_name + \ - "." + config.test_style + ".report.txt" - # Actual file system location of the target. - benchmark_report_path = os.path.join(config.build_dir(None), - test_path, - benchmark_report_target) - to_exec.append(benchmark_report_target) - - returncode = execute_command(logfile, - config.build_dir(None), to_exec, config.report_dir) - assert returncode == 0, "Remake command failed." - assert os.path.exists(benchmark_report_path), "Missing " \ - "generated report: " + benchmark_report_path - - # Now we need to pull out the results into the CSV format LNT can read. - schema = os.path.join(config.test_suite_root, - "TEST." + config.test_style + ".report") - result_path = os.path.join(config.build_dir(None), - test_path, "Output", - test_name + "." + config.test_style + ".report.csv") - - gen_report_template = "{gen} -csv {schema} < {input} > {output}" - gen_cmd = gen_report_template.format(gen=config.generate_report_script, - schema=schema, input=benchmark_report_path, output=result_path) - bash_gen_cmd = ["/bin/bash", "-c", gen_cmd] - - assert not os.path.exists(result_path), "Results should not exist yet." + \ - result_path - returncode = execute_command(logfile, - config.build_dir(None), bash_gen_cmd, config.report_dir) - assert returncode == 0, "command failed" - assert os.path.exists(result_path), "Missing results file." - - results = load_nt_report_file(result_path, config) - assert len(results) > 0 - return results - -def _unix_quote_args(s): - return map(pipes.quote, shlex.split(s)) +from lnt.tests.nt_buildsystem.makefile import NT_Makefile # When set to true, all benchmarks will be rerun. # TODO: remove me when rerun patch is done. @@ -1250,6 +57,10 @@ SERVER_MEM_RESULT = "mem" SERVER_HASH_RESULT = "hash" +KNOWN_SAMPLE_KEYS = ('compile', 'exec', 'hash', + 'gcc.compile', 'bc.compile', 'llc.compile', + 'llc-beta.compile', 'jit.compile', 'gcc.exec', 'llc.exec', + 'llc-beta.exec', 'jit.exec') class PastRunData(object): """To decide if we need to rerun, we must know @@ -1304,8 +115,7 @@ repr(self.execution_status), repr(self.execution_time)) - -def _process_reruns(config, server_reply, local_results): +def _process_reruns(runner, server_reply, local_results): """Rerun each benchmark which the server reported "changed", N more times. """ @@ -1410,9 +220,7 @@ i + 1, len(rerunable_benches))) - fresh_samples = rerun_test(config, - bench.name, - NUMBER_OF_RERUNS) + fresh_samples = runner.rerun(bench.name, NUMBER_OF_RERUNS) rerun_results.extend(fresh_samples) return rerun_results @@ -1868,16 +676,9 @@ warning('expected --isysroot when executing with ' '--ios-simulator-sdk') - config = TestConfiguration(vars(opts), timestamp()) - # FIXME: We need to validate that there is no configured output in the - # test-suite directory, that borks things. - prepare_report_dir(config) - # These notes are used by the regression tests to check if we've handled - # flags correctly. - note('TARGET_FLAGS: {}'.format(' '.join(config.target_flags))) - if config.qemu_user_mode: - note('QEMU_USER_MODE_COMMAND: {}'.format(config.qemu_user_mode_command)) + # FIXME: Add extension objects here too. + runner = NT_Makefile(timestamp(), vars(opts)) # Multisample, if requested. if opts.multisample is not None: @@ -1887,7 +688,7 @@ for i in range(opts.multisample): print >>sys.stderr, "%s: (multisample) running iteration %d" % ( timestamp(), i) - report = run_test(nick, i, config) + report = runner.run(nick, i) reports.append(report) # Create the merged report. @@ -1902,51 +703,55 @@ for r in reports], []) # Write out the merged report. - lnt_report_path = config.report_path(None) + lnt_report_path = runner.report_path() report = lnt.testing.Report(machine, run, test_samples) lnt_report_file = open(lnt_report_path, 'w') print >>lnt_report_file, report.render() lnt_report_file.close() else: - test_results = run_test(nick, None, config) + test_results = runner.run(nick) + if opts.rerun: self.log("Performing any needed reruns.") - server_report = self.submit_helper(config, commit=False) - new_samples = _process_reruns(config, server_report, test_results) + server_report = self.submit_helper(runner.report_path(), + opts.submit_url, + commit=False) + new_samples = _process_reruns(runner, server_report, test_results) test_results.update_report(new_samples) # persist report with new samples. - lnt_report_path = config.report_path(None) + lnt_report_path = runner.report_path() lnt_report_file = open(lnt_report_path, 'w') print >>lnt_report_file, test_results.render() lnt_report_file.close() - if config.output is not None: - self.print_report(test_results, config.output) + if opts.output is not None: + self.print_report(test_results, opts.output) commit = True - server_report = self.submit_helper(config, commit) + server_report = self.submit_helper(runner.report_path(), + opts.submit_url, + commit) ImportData.print_report_result(server_report, sys.stdout, sys.stderr, - config.verbose) + opts.verbose) return server_report - def submit_helper(self, config, commit=False): + def submit_helper(self, report_path, submit_url, commit=False): """Submit the report to the server. If no server was specified, use a local mock server. """ - report_path = config.report_path(None) assert os.path.exists(report_path), "Passed an invalid report file. " \ "Should have never gotten here!" result = None - if config.submit_url: + if submit_url: from lnt.util import ServerUtil - for server in config.submit_url: + for server in submit_url: self.log("submitting result to %r" % (server,)) try: result = ServerUtil.submitFile(server, report_path, Index: lnt/tests/nt_buildsystem/__init__.py =================================================================== --- /dev/null +++ lnt/tests/nt_buildsystem/__init__.py @@ -0,0 +1,16 @@ + + +# NT_Buildsystem - small abstraction layer over Makefile-based or CMake-based +# drivers for test-suite. +class NT_Buildsystem(object): + def __init__(self, timestamp, opts): + raise RuntimeError("Subclass me!") + + def run(self, nick, multisample_iteration=None, compile=True, test=True): + raise RuntimeError("Subclass me!") + + def rerun(self, name, num_reruns): + raise RuntimeError("Subclass me!") + + def report_path(self): + raise RuntimeError("Subclass me!") Index: lnt/tests/nt_buildsystem/makefile.py =================================================================== --- /dev/null +++ lnt/tests/nt_buildsystem/makefile.py @@ -0,0 +1,1247 @@ +import csv +import os +import platform +import re +import shutil +import subprocess +import sys +import glob +import time +import traceback +from datetime import datetime +from optparse import OptionParser, OptionGroup +import urllib2 +import shlex +import pipes + +import lnt.testing +import lnt.testing.util.compilers +import lnt.util.ImportData as ImportData + +from lnt.testing.util.commands import note, warning, fatal +from lnt.testing.util.commands import capture, mkdir_p, which +from lnt.testing.util.commands import resolve_command_path + +from lnt.testing.util.rcs import get_source_version + +from lnt.testing.util.misc import timestamp + +from lnt.server.reporting.analysis import UNCHANGED_PASS, UNCHANGED_FAIL +from lnt.server.reporting.analysis import REGRESSED, IMPROVED +from lnt.util import ImportData + +import lnt.tests.nt +from lnt.tests.nt_buildsystem import NT_Buildsystem + +class TestConfiguration(object): + """Store and calculate important paths and options for this test based + on the command line arguments. This object is stateless and only + based on the command line arguments! Options which take a long + time to calculate are cached, since we are stateless this is okay. + + """ + + def __init__(self, opts, start_time): + """Prepare the configuration: + opts -- the command line options object + start_time -- the time the program was invoked as a string + """ + assert type(opts) == dict, "Options must be a dict." + self.opts = opts + self.__dict__.update(opts) + self.start_time = start_time + + # Report directory cache. + self._report_dir = None + # Compiler interrogation is a lot of work, this will cache it. + self._cc_info = None + # Getting compiler version spawns subprocesses, cache it. + self._get_source_version = None + self.rerun_test = None + + @property + def report_dir(self): + """Get the (possibly cached) path to the directory where test suite + will be placed. Report dir is a directory within the sandbox which + is either "build" or a timestamped directory based on """ + if self._report_dir is not None: + return self._report_dir + + if self.timestamp_build: + ts = self.start_time.replace(' ', '_').replace(':', '-') + build_dir_name = "test-%s" % ts + else: + build_dir_name = "build" + basedir = os.path.join(self.sandbox_path, build_dir_name) + # Canonicalize paths, in case we are using e.g. an NFS remote mount. + # + # FIXME: This should be eliminated, along with the realpath call below. + basedir = os.path.realpath(basedir) + self._report_dir = basedir + return basedir + + def report_path(self, iteration): + """Path to a single run's JSON results file.""" + return os.path.join(self.build_dir(iteration), 'report.json') + + def build_dir(self, iteration): + """Path of the build dir within the report dir. iteration -- the + iteration number if multisample otherwise None. + When multisample is off report_dir == build_dir. + """ + # Do nothing in single-sample build, because report_dir and the + # build_dir is the same directory. + if iteration is None: + return self.report_dir + + # Create the directory for individual iteration. + return os.path.join(self.report_dir, "sample-%d" % iteration) + + @property + def target_flags(self): + """Computed target flags list.""" + # Compute TARGET_FLAGS. + target_flags = [] + + # FIXME: Eliminate this blanket option. + target_flags.extend(self.cflags) + + if self.cflag_string: + # FIXME: This isn't generally OK on Windows :/ + target_flags.extend(_unix_quote_args(self.cflag_string)) + + # Pass flags to backend. + for f in self.mllvm: + target_flags.extend(['-mllvm', f]) + + if self.arch is not None: + target_flags.append('-arch') + target_flags.append(self.arch) + if self.isysroot is not None: + target_flags.append('-isysroot') + target_flags.append(self.isysroot) + return target_flags + + @property + def cc_info(self): + """Discovered compiler information from the cc under test. Cached + because discovery is slow. + + """ + if self._cc_info is None: + self._cc_info = lnt.testing.util.compilers.get_cc_info( + self.cc_under_test, + self.target_flags) + return self._cc_info + + @property + def target(self): + """Discovered compiler's target information.""" + # Get compiler info. + cc_target = self.cc_info.get('cc_target') + return cc_target + + @property + def llvm_source_version(self): + """The version of llvm from llvm_src_root.""" + if self.llvm_src_root: + if self._get_source_version is None: + self._get_source_version = get_source_version( + self.llvm_src_root) + return self._get_source_version + else: + return None + + @property + def qemu_user_mode_command(self): + """ The command used for qemu user mode """ + assert self.qemu_user_mode + qemu_cmd_line = [self.qemu_user_mode] + self.qemu_flags + if self.qemu_string: + qemu_cmd_line += _unix_quote_args(self.qemu_string) + return ' '.join(qemu_cmd_line) + + @property + def generate_report_script(self): + """ The path to the report generation script. """ + return os.path.join(self.test_suite_root, "GenerateReport.pl") + + def build_report_path(self, iteration): + """The path of the results.csv file which each run of the test suite + will produce. + iteration -- the multisample iteration number otherwise None.""" + report_path = os.path.join(self.build_dir(iteration)) + if self.only_test is not None: + report_path = os.path.join(report_path, self.only_test) + report_path = os.path.join(report_path, 'report.%s.csv' % + self.test_style) + return report_path + + def test_log_path(self, iteration): + """The path of the log file for the build. + iteration -- the multisample iteration number otherwise None.""" + return os.path.join(self.build_dir(iteration), 'test.log') + + def compute_run_make_variables(self, profiling=False): + """Compute make variables from command line arguments and compiler. + Returns a dict of make_variables as well as a public version + with the remote options removed. + + """ + cc_info = self.cc_info + # Set the make variables to use. + make_variables = { + 'TARGET_CC': self.cc_reference, + 'TARGET_CXX': self.cxx_reference, + 'TARGET_LLVMGCC': self.cc_under_test, + 'TARGET_LLVMGXX': self.cxx_under_test, + 'TARGET_FLAGS': ' '.join(self.target_flags), + } + + # Compute TARGET_LLCFLAGS, for TEST=nightly runs. + if self.test_style == "nightly": + # Compute TARGET_LLCFLAGS. + target_llcflags = [] + if self.mcpu is not None: + target_llcflags.append('-mcpu') + target_llcflags.append(self.mcpu) + if self.relocation_model is not None: + target_llcflags.append('-relocation-model') + target_llcflags.append(self.relocation_model) + if self.disable_fp_elim: + target_llcflags.append('-disable-fp-elim') + make_variables['TARGET_LLCFLAGS'] = ' '.join(target_llcflags) + + # Set up environment overrides if requested, to effectively + # run under the specified Darwin iOS simulator. + # + # See /D/P/../Developer/Tools/RunPlatformUnitTests. + if self.ios_simulator_sdk is not None: + make_variables['EXECUTION_ENVIRONMENT_OVERRIDES'] = ' '.join( + ['DYLD_FRAMEWORK_PATH="%s"' % self.ios_simulator_sdk, + 'DYLD_LIBRARY_PATH=""', + 'DYLD_ROOT_PATH="%s"' % self.ios_simulator_sdk, + 'DYLD_NEW_LOCAL_SHARED_REGIONS=YES', + 'DYLD_NO_FIX_PREBINDING=YES', + 'IPHONE_SIMULATOR_ROOT="%s"' % self.ios_simulator_sdk, + 'CFFIXED_USER_HOME="%s"' % os.path.expanduser( + "~/Library/Application Support/iPhone Simulator/User")]) + + # Pick apart the build mode. + build_mode = self.build_mode + if build_mode.startswith("Debug"): + build_mode = build_mode[len("Debug"):] + make_variables['ENABLE_OPTIMIZED'] = '0' + elif build_mode.startswith("Unoptimized"): + build_mode = build_mode[len("Unoptimized"):] + make_variables['ENABLE_OPTIMIZED'] = '0' + elif build_mode.startswith("Release"): + build_mode = build_mode[len("Release"):] + make_variables['ENABLE_OPTIMIZED'] = '1' + else: + fatal('invalid build mode: %r' % self.build_mode) + + while build_mode: + for (name, key) in (('+Asserts', 'ENABLE_ASSERTIONS'), + ('+Checks', 'ENABLE_EXPENSIVE_CHECKS'), + ('+Coverage', 'ENABLE_COVERAGE'), + ('+Debug', 'DEBUG_SYMBOLS'), + ('+Profile', 'ENABLE_PROFILING')): + if build_mode.startswith(name): + build_mode = build_mode[len(name):] + make_variables[key] = '1' + break + else: + fatal('invalid build mode: %r' % self.build_mode) + + # Assertions are disabled by default. + if 'ENABLE_ASSERTIONS' in make_variables: + del make_variables['ENABLE_ASSERTIONS'] + else: + make_variables['DISABLE_ASSERTIONS'] = '1' + + # Set the optimization level options. + make_variables['OPTFLAGS'] = self.optimize_option + if self.optimize_option == '-Os': + make_variables['LLI_OPTFLAGS'] = '-O2' + make_variables['LLC_OPTFLAGS'] = '-O2' + else: + make_variables['LLI_OPTFLAGS'] = self.optimize_option + make_variables['LLC_OPTFLAGS'] = self.optimize_option + + # Set test selection variables. + if not self.test_cxx: + make_variables['DISABLE_CXX'] = '1' + if not self.test_jit: + make_variables['DISABLE_JIT'] = '1' + if not self.test_llc: + make_variables['DISABLE_LLC'] = '1' + if not self.test_lto: + make_variables['DISABLE_LTO'] = '1' + if self.test_llcbeta: + make_variables['ENABLE_LLCBETA'] = '1' + if self.test_small: + make_variables['SMALL_PROBLEM_SIZE'] = '1' + if self.test_large: + if self.test_small: + fatal('the --small and --large options are mutually exclusive') + make_variables['LARGE_PROBLEM_SIZE'] = '1' + if self.test_benchmarking_only: + make_variables['BENCHMARKING_ONLY'] = '1' + if self.test_integrated_as: + make_variables['TEST_INTEGRATED_AS'] = '1' + if self.liblto_path: + make_variables['LD_ENV_OVERRIDES'] = ( + 'env DYLD_LIBRARY_PATH=%s' % os.path.dirname( + self.liblto_path)) + + if self.threads > 1 or self.build_threads > 1: + make_variables['ENABLE_PARALLEL_REPORT'] = '1' + + # Select the test style to use. + if self.test_style == "simple": + # We always use reference outputs with TEST=simple. + make_variables['ENABLE_HASHED_PROGRAM_OUTPUT'] = '1' + make_variables['USE_REFERENCE_OUTPUT'] = '1' + make_variables['TEST'] = self.test_style + + # Set CC_UNDER_TEST_IS_CLANG when appropriate. + if cc_info.get('cc_name') in ('apple_clang', 'clang'): + make_variables['CC_UNDER_TEST_IS_CLANG'] = '1' + elif cc_info.get('cc_name') in ('llvm-gcc',): + make_variables['CC_UNDER_TEST_IS_LLVM_GCC'] = '1' + elif cc_info.get('cc_name') in ('gcc',): + make_variables['CC_UNDER_TEST_IS_GCC'] = '1' + + # Convert the target arch into a make variable, to allow more + # target based specialization (e.g., + # CC_UNDER_TEST_TARGET_IS_ARMV7). + if '-' in cc_info.get('cc_target', ''): + arch_name = cc_info.get('cc_target').split('-', 1)[0] + make_variables['CC_UNDER_TEST_TARGET_IS_' + arch_name.upper()] = '1' + + # Set LLVM_RELEASE_IS_PLUS_ASSERTS when appropriate, to allow + # testing older LLVM source trees. + llvm_source_version = self.llvm_source_version + if (llvm_source_version and llvm_source_version.isdigit() and + int(llvm_source_version) < 107758): + make_variables['LLVM_RELEASE_IS_PLUS_ASSERTS'] = 1 + + # Set ARCH appropriately, based on the inferred target. + # + # FIXME: We should probably be more strict about this. + cc_target = cc_info.get('cc_target') + llvm_arch = self.llvm_arch + if cc_target and llvm_arch is None: + # cc_target is expected to be a (GCC style) target + # triple. Pick out the arch component, and then try to + # convert it to an LLVM nightly test style architecture + # name, which is of course totally different from all of + # GCC names, triple names, LLVM target names, and LLVM + # triple names. Stupid world. + # + # FIXME: Clean this up once everyone is on 'lnt runtest + # nt' style nightly testing. + arch = cc_target.split('-', 1)[0].lower() + if (len(arch) == 4 and arch[0] == 'i' and arch.endswith('86') and + arch[1] in '3456789'): # i[3-9]86 + llvm_arch = 'x86' + elif arch in ('x86_64', 'amd64'): + llvm_arch = 'x86_64' + elif arch in ('powerpc', 'powerpc64', 'ppu'): + llvm_arch = 'PowerPC' + elif (arch == 'arm' or arch.startswith('armv') or + arch == 'thumb' or arch.startswith('thumbv') or + arch == 'xscale'): + llvm_arch = 'ARM' + elif arch in ('aarch64', 'arm64'): + llvm_arch = 'AArch64' + elif arch.startswith('alpha'): + llvm_arch = 'Alpha' + elif arch.startswith('sparc'): + llvm_arch = 'Sparc' + elif arch in ('mips', 'mipsel', 'mips64', 'mips64el'): + llvm_arch = 'Mips' + + if llvm_arch is not None: + make_variables['ARCH'] = llvm_arch + else: + warning("unable to infer ARCH, some tests may not run correctly!") + + # Add in any additional make flags passed in via --make-param. + for entry in self.make_parameters: + if '=' not in entry: + name, value = entry, '' + else: + name, value = entry.split('=', 1) + + make_variables[name] = value + + # Set remote execution variables, if used. + if self.remote: + # make a copy of args for report, without remote options. + public_vars = make_variables.copy() + make_variables['REMOTE_HOST'] = self.remote_host + make_variables['REMOTE_USER'] = self.remote_user + make_variables['REMOTE_PORT'] = str(self.remote_port) + make_variables['REMOTE_CLIENT'] = self.remote_client + else: + public_vars = make_variables + + # Set qemu user mode variables, if used. + if self.qemu_user_mode: + make_variables['USER_MODE_EMULATION'] = '1' + make_variables['RUNUNDER'] = self.qemu_user_mode_command + + # Set USE_PERF flag, if specified. + if self.use_perf: + make_variables['USE_PERF'] = '1' + + return make_variables, public_vars + +def execute_command(test_log, basedir, args, report_dir): + logfile = test_log + + if report_dir is not None: + logfile = subprocess.PIPE + # Open a duplicated logfile at the global dir. + _, logname = os.path.split(test_log.name) + global_log_path = os.path.join(report_dir, logname) + global_log = open(global_log_path, 'a+') + + p = subprocess.Popen(args=args, stdin=None, stdout=logfile, + stderr=subprocess.STDOUT, cwd=basedir, + env=os.environ) + + if report_dir is not None: + while p.poll() is None: + l = p.stdout.readline() + if len(l) > 0: + test_log.write(l) + global_log.write(l) + + global_log.close() + + return p.wait() + +def execute_nt_tests(test_log, make_variables, basedir, config): + report_dir = config.report_dir + common_args = ['make', '-k'] + common_args.extend('%s=%s' % (k,v) for k,v in make_variables.items()) + if config.only_test is not None: + common_args.extend(['-C',config.only_test]) + + # If we are using isolation, run under sandbox-exec. + if config.use_isolation: + # Write out the sandbox profile. + sandbox_profile_path = os.path.join(basedir, "isolation.sb") + print >>sys.stderr, "%s: creating sandbox profile %r" % ( + timestamp(), sandbox_profile_path) + with open(sandbox_profile_path, 'w') as f: + print >>f, """ +;; Sandbox profile for isolation test access. +(version 1) + +;; Allow everything by default, and log debug messages on deny. +(allow default) +(debug deny) + +;; Deny all file writes by default. +(deny file-write*) + +;; Deny all network access by default. +(deny network*) + +;; Explicitly allow writes to temporary directories, /dev/, and the sandbox +;; output directory. +(allow file-write* (regex #"^/private/var/tmp/") + (regex #"^/private/tmp/") + (regex #"^/private/var/folders/") + (regex #"^/dev/") + (regex #"^%s"))""" % (basedir,) + common_args = ['sandbox-exec', '-f', sandbox_profile_path] + common_args + + # Run a separate 'make build' step if --build-threads was given. + if config.build_threads > 0: + args = common_args + ['-j', str(config.build_threads), 'build'] + print >>test_log, '%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)) + test_log.flush() + + print >>sys.stderr, '%s: building "nightly tests" with -j%u...' % ( + timestamp(), config.build_threads) + res = execute_command(test_log, basedir, args, report_dir) + if res != 0: + print >> sys.stderr, "Failure while running make build! See log: %s"%(test_log.name) + + # Then 'make report'. + args = common_args + ['-j', str(config.threads), + 'report', 'report.%s.csv' % config.test_style] + print >>test_log, '%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)) + test_log.flush() + + # FIXME: We shouldn't need to set env=os.environ here, but if we don't + # somehow MACOSX_DEPLOYMENT_TARGET gets injected into the environment on OS + # X (which changes the driver behavior and causes generally weirdness). + print >>sys.stderr, '%s: executing "nightly tests" with -j%u...' % ( + timestamp(), config.threads) + + res = execute_command(test_log, basedir, args, report_dir) + + if res != 0: + print >> sys.stderr, "Failure while running nightly tests! See log: %s" % (test_log.name) + + +# Keep a mapping of mangled test names, to the original names in the test-suite. +TEST_TO_NAME = {} + +def load_nt_report_file(report_path, config): + # Compute the test samples to report. + sample_keys = [] + + def append_to_sample_keys(tup): + stat = tup[1] + assert stat in lnt.tests.nt.KNOWN_SAMPLE_KEYS + if stat not in config.exclude_stat_from_submission: + sample_keys.append(tup) + if config.test_style == "simple": + test_namespace = 'nts' + time_stat = '' + # for now, user time is the unqualified Time stat + if config.test_time_stat == "real": + time_stat = 'Real_' + append_to_sample_keys((True, 'compile', 'CC_' + time_stat + 'Time', + None, 'CC', float)) + append_to_sample_keys((False, 'hash', 'CC_Hash', None, 'CC', str)) + append_to_sample_keys((True, 'exec', 'Exec_' + time_stat + 'Time', + None, 'Exec', float)) + else: + test_namespace = 'nightlytest' + append_to_sample_keys((True, 'gcc.compile', 'GCCAS', 'time')) + append_to_sample_keys((True, 'bc.compile', 'Bytecode', 'size')) + if config.test_llc: + append_to_sample_keys((True, 'llc.compile', 'LLC compile', 'time')) + if config.test_llcbeta: + append_to_sample_keys((True, 'llc-beta.compile', + 'LLC-BETA compile', 'time')) + if config.test_jit: + append_to_sample_keys((True, 'jit.compile', 'JIT codegen', 'time')) + append_to_sample_keys((True, 'gcc.exec', 'GCC', 'time')) + if config.test_llc: + append_to_sample_keys((True, 'llc.exec', 'LLC', 'time')) + if config.test_llcbeta: + append_to_sample_keys((True, 'llc-beta.exec', 'LLC-BETA', 'time')) + if config.test_jit: + append_to_sample_keys((True, 'jit.exec', 'JIT', 'time')) + + # Load the report file. + report_file = open(report_path, 'rb') + reader_it = iter(csv.reader(report_file)) + + # Get the header. + header = reader_it.next() + if header[0] != 'Program': + fatal('unexpected report file, missing header') + + # Verify we have the keys we expect. + if 'Program' not in header: + fatal('missing key %r in report header' % 'Program') + for item in sample_keys: + required = item[0] + header_name = item[2] + if required and header_name not in header: + fatal('missing key %r in report header' % header_name) + + # We don't use the test info, currently. + test_info = {} + test_samples = [] + for row in reader_it: + record = dict(zip(header, row)) + + program = record['Program'] + + if config.only_test is not None: + program = os.path.join(config.only_test, program) + if config.rerun_test is not None: + program = os.path.join(config.rerun_test, program) + + program_real = program + program_mangled = program.replace('.','_') + test_base_name = program_mangled + + # Check if this is a subtest result, in which case we ignore missing + # values. + if '_Subtest_' in test_base_name: + is_subtest = True + test_base_name = test_base_name.replace('_Subtest_', '.') + + else: + is_subtest = False + + test_base_name = '%s.%s' % (test_namespace, test_base_name) + + TEST_TO_NAME[test_base_name] = program_real + + for info in sample_keys: + if len(info) == 4: + required, name, key, tname = info + success_key = None + conv_f = float + else: + required, name, key, tname, success_key, conv_f = info + + test_name = '%s.%s' % (test_base_name, name) + if not required and key not in record: + continue + value = record[key] + if success_key is None: + success_value = value + else: + success_value = record[success_key] + + # FIXME: Move to simpler and more succinct format, using .failed. + if success_value == '*': + if is_subtest: + continue + status_value = lnt.testing.FAIL + elif success_value == 'xfail': + status_value = lnt.testing.XFAIL + else: + status_value = lnt.testing.PASS + + if test_namespace == 'nightlytest': + test_samples.append(lnt.testing.TestSamples( + test_name + '.success', + [status_value != lnt.testing.FAIL], test_info)) + else: + if status_value != lnt.testing.PASS: + test_samples.append(lnt.testing.TestSamples( + test_name + '.status', [status_value], test_info)) + if value != '*': + sample_test_name = test_name + if tname is not None: + sample_test_name += '.' + tname + test_samples.append(lnt.testing.TestSamples( + sample_test_name, [conv_f(value)], test_info, + conv_f=conv_f)) + + report_file.close() + + return test_samples + +def prepare_report_dir(config): + # Set up the sandbox. + sandbox_path = config.sandbox_path + print sandbox_path + if not os.path.exists(sandbox_path): + print >>sys.stderr, "%s: creating sandbox: %r" % ( + timestamp(), sandbox_path) + os.mkdir(sandbox_path) + + # Create the per-test directory. + report_dir = config.report_dir + if os.path.exists(report_dir): + needs_clean = True + else: + needs_clean = False + os.mkdir(report_dir) + + # Unless not using timestamps, we require the report dir not to exist. + if needs_clean and config.timestamp_build: + fatal('refusing to reuse pre-existing build dir %r' % report_dir) + +def prepare_build_dir(config, iteration) : + # report_dir is supposed to be canonicalized, so we do not need to + # call os.path.realpath before mkdir. + build_dir = config.build_dir(iteration) + if iteration is None: + return build_dir + + if os.path.exists(build_dir): + needs_clean = True + else: + needs_clean = False + os.mkdir(build_dir) + + # Unless not using timestamps, we require the basedir not to exist. + if needs_clean and config.timestamp_build: + fatal('refusing to reuse pre-existing build dir %r' % build_dir) + return build_dir + +def update_tools(make_variables, config, iteration): + """Update the test suite tools. """ + + print >>sys.stderr, '%s: building test-suite tools' % (timestamp(),) + args = ['make', 'tools'] + args.extend('%s=%s' % (k,v) for k,v in make_variables.items()) + build_tools_log_path = os.path.join(config.build_dir(iteration), + 'build-tools.log') + build_tools_log = open(build_tools_log_path, 'w') + print >>build_tools_log, '%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)) + build_tools_log.flush() + res = execute_command(build_tools_log, config.build_dir(iteration), + args, config.report_dir) + build_tools_log.close() + if res != 0: + fatal('Unable to build tools, aborting! See log: %s'%(build_tools_log_path)) + +def configure_test_suite(config, iteration): + """Run configure on the test suite.""" + + basedir = config.build_dir(iteration) + configure_log_path = os.path.join(basedir, 'configure.log') + configure_log = open(configure_log_path, 'w') + + args = [os.path.realpath(os.path.join(config.test_suite_root, + 'configure'))] + if config.without_llvm: + args.extend(['--without-llvmsrc', '--without-llvmobj']) + else: + args.extend(['--with-llvmsrc=%s' % config.llvm_src_root, + '--with-llvmobj=%s' % config.llvm_obj_root]) + + if config.test_suite_externals: + args.append('--with-externals=%s' % + os.path.realpath(config.test_suite_externals)) + + print >>configure_log, '%s: running: %s' % (timestamp(), + ' '.join('"%s"' % a + for a in args)) + configure_log.flush() + + print >>sys.stderr, '%s: configuring...' % timestamp() + res = execute_command(configure_log, basedir, args, config.report_dir) + configure_log.close() + if res != 0: + fatal('Configure failed, log is here: %r' % configure_log_path) + +def copy_missing_makefiles(config, basedir): + """When running with only_test something, makefiles will be missing, + so copy them into place. """ + suffix = '' + for component in config.only_test.split('/'): + suffix = os.path.join(suffix, component) + obj_path = os.path.join(basedir, suffix) + src_path = os.path.join(config.test_suite_root, suffix) + if not os.path.exists(obj_path): + print '%s: initializing test dir %s' % (timestamp(), suffix) + os.mkdir(obj_path) + shutil.copyfile(os.path.join(src_path, 'Makefile'), + os.path.join(obj_path, 'Makefile')) + +class TestModule(object): + """ + Base class for extension test modules. + """ + + def __init__(self): + self._log = None + + def main(self): + raise NotImplementedError + + def execute_test(self, options): + raise RuntimeError("Abstract Method.") + + def _execute_test(self, test_log, options): + self._log = test_log + try: + return self.execute_test(options) + finally: + self._log = None + + @property + def log(self): + """Get the test log output stream.""" + if self._log is None: + raise ValueError("log() unavailable outside test execution") + return self._log + +### + +def scan_for_test_modules(config): + base_modules_path = os.path.join(config.test_suite_root, 'LNTBased') + if config.only_test is None: + test_modules_path = base_modules_path + elif config.only_test.startswith('LNTBased'): + test_modules_path = os.path.join(config.test_suite_root, config.only_test) + else: + return + + # We follow links here because we want to support the ability for having + # various "suites" of LNTBased tests in separate repositories, and allowing + # users to just checkout them out elsewhere and link them into their LLVM + # test-suite source tree. + for dirpath,dirnames,filenames in os.walk(test_modules_path, + followlinks = True): + # Ignore the example tests, unless requested. + if not config.include_test_examples and 'Examples' in dirnames: + dirnames.remove('Examples') + + # Check if this directory defines a test module. + if 'TestModule' not in filenames: + continue + + # If so, don't traverse any lower. + del dirnames[:] + + # Add to the list of test modules. + assert dirpath.startswith(base_modules_path + '/') + yield dirpath[len(base_modules_path) + 1:] + +# FIXME: Support duplicate logfiles to global directory. +def execute_test_modules(test_log, test_modules, test_module_variables, + basedir, config): + # For now, we don't execute these in parallel, but we do forward the + # parallel build options to the test. + test_modules.sort() + + print >>sys.stderr, '%s: executing test modules' % (timestamp(),) + results = [] + for name in test_modules: + # First, load the test module file. + locals = globals = {} + test_path = os.path.join(config.test_suite_root, 'LNTBased', name) + test_obj_path = os.path.join(basedir, 'LNTBased', name) + module_path = os.path.join(test_path, 'TestModule') + module_file = open(module_path) + try: + exec module_file in locals, globals + except: + info = traceback.format_exc() + fatal("unable to import test module: %r\n%s" % ( + module_path, info)) + + # Lookup and instantiate the test class. + test_class = globals.get('test_class') + if test_class is None: + fatal("no 'test_class' global in import test module: %r" % ( + module_path,)) + try: + test_instance = test_class() + except: + fatal("unable to instantiate test class for: %r" % module_path) + + if not isinstance(test_instance, TestModule): + fatal("invalid test class (expected lnt.tests.nt.TestModule " + "subclass) for: %r" % module_path) + + # Create the per test variables, and ensure the output directory exists. + variables = test_module_variables.copy() + variables['MODULENAME'] = name + variables['SRCROOT'] = test_path + variables['OBJROOT'] = test_obj_path + mkdir_p(test_obj_path) + + # Execute the tests. + try: + test_samples = test_instance._execute_test(test_log, variables) + except: + info = traceback.format_exc() + fatal("exception executing tests for: %r\n%s" % ( + module_path, info)) + + # Check that the test samples are in the expected format. + is_ok = True + try: + test_samples = list(test_samples) + for item in test_samples: + if not isinstance(item, lnt.testing.TestSamples): + is_ok = False + break + except: + is_ok = False + if not is_ok: + fatal("test module did not return samples list: %r" % ( + module_path,)) + + results.append((name, test_samples)) + + return results + +def compute_test_module_variables(make_variables, config): + # Set the test module options, which we try and restrict to a tighter subset + # than what we pass to the LNT makefiles. + test_module_variables = { + 'CC' : make_variables['TARGET_LLVMGCC'], + 'CXX' : make_variables['TARGET_LLVMGXX'], + 'CFLAGS' : (make_variables['TARGET_FLAGS'] + ' ' + + make_variables['OPTFLAGS']), + 'CXXFLAGS' : (make_variables['TARGET_FLAGS'] + ' ' + + make_variables['OPTFLAGS']) } + + # Add the remote execution variables. + if config.remote: + test_module_variables['REMOTE_HOST'] = make_variables['REMOTE_HOST'] + test_module_variables['REMOTE_USER'] = make_variables['REMOTE_USER'] + test_module_variables['REMOTE_PORT'] = make_variables['REMOTE_PORT'] + test_module_variables['REMOTE_CLIENT'] = make_variables['REMOTE_CLIENT'] + + # Add miscellaneous optional variables. + if 'LD_ENV_OVERRIDES' in make_variables: + value = make_variables['LD_ENV_OVERRIDES'] + assert value.startswith('env ') + test_module_variables['LINK_ENVIRONMENT_OVERRIDES'] = value[4:] + + # This isn't possible currently, just here to mark what the option variable + # would be called. + if 'COMPILE_ENVIRONMENT_OVERRIDES' in make_variables: + test_module_variables['COMPILE_ENVIRONMENT_OVERRIDES'] = \ + make_variables['COMPILE_ENVIRONMENT_OVERRIDES'] + + if 'EXECUTION_ENVIRONMENT_OVERRIDES' in make_variables: + test_module_variables['EXECUTION_ENVIRONMENT_OVERRIDES'] = \ + make_variables['EXECUTION_ENVIRONMENT_OVERRIDES'] + + # We pass the test execution values as variables too, this might be better + # passed as actual arguments. + test_module_variables['THREADS'] = config.threads + test_module_variables['BUILD_THREADS'] = config.build_threads or \ + config.threads + return test_module_variables + +def run_test(nick_prefix, iteration, config): + print >>sys.stderr, "%s: checking source versions" % ( + timestamp(),) + + test_suite_source_version = get_source_version(config.test_suite_root) + + # Compute the make variables. + make_variables, public_make_variables = config.compute_run_make_variables(profiling) + + # Compute the test module variables, which are a restricted subset of the + # make variables. + test_module_variables = compute_test_module_variables(make_variables, config) + + # Scan for LNT-based test modules. + print >>sys.stderr, "%s: scanning for LNT-based test modules" % ( + timestamp(),) + test_modules = list(scan_for_test_modules(config)) + print >>sys.stderr, "%s: found %d LNT-based test modules" % ( + timestamp(), len(test_modules)) + + nick = nick_prefix + if config.auto_name: + # Construct the nickname from a few key parameters. + cc_info = config.cc_info + cc_nick = '%s_%s' % (cc_info.get('cc_name'), cc_info.get('cc_build')) + nick += "__%s__%s" % (cc_nick, cc_info.get('cc_target').split('-')[0]) + print >>sys.stderr, "%s: using nickname: %r" % (timestamp(), nick) + + basedir = prepare_build_dir(config, iteration) + + # FIXME: Auto-remove old test directories in the source directory (which + # cause make horrible fits). + + start_time = timestamp() + print >>sys.stderr, '%s: starting test in %r' % (start_time, basedir) + + + # Configure the test suite. + if config.run_configure or not os.path.exists(os.path.join( + basedir, 'Makefile.config')): + configure_test_suite(config, iteration) + + # If running with --only-test, creating any dirs which might be missing and + # copy Makefiles. + if config.only_test is not None and not config.only_test.startswith("LNTBased"): + copy_missing_makefiles(config, basedir) + + # If running without LLVM, make sure tools are up to date. + if config.without_llvm: + update_tools(make_variables, config, iteration) + + # Always blow away any existing report. + build_report_path = config.build_report_path(iteration) + if os.path.exists(build_report_path): + os.remove(build_report_path) + + # Execute the tests. + test_log = open(config.test_log_path(iteration), 'w') + + # Run the make driven tests if needed. + run_nightly_test = (config.only_test is None or + not config.only_test.startswith("LNTBased")) + if run_nightly_test: + execute_nt_tests(test_log, make_variables, basedir, config) + + # Run the extension test modules, if needed. + test_module_results = execute_test_modules(test_log, test_modules, + test_module_variables, basedir, + config) + test_log.close() + + end_time = timestamp() + + # Load the nightly test samples. + if config.test_style == "simple": + test_namespace = 'nts' + else: + test_namespace = 'nightlytest' + if run_nightly_test: + print >>sys.stderr, '%s: loading nightly test data...' % timestamp() + # If nightly test went screwy, it won't have produced a report. + print build_report_path + if not os.path.exists(build_report_path): + fatal('nightly test failed, no report generated') + + test_samples = load_nt_report_file(build_report_path, config) + else: + test_samples = [] + + # Merge in the test samples from all of the test modules. + existing_tests = set(s.name for s in test_samples) + for module,results in test_module_results: + for s in results: + if s.name in existing_tests: + fatal("test module %r added duplicate test: %r" % ( + module, s.name)) + existing_tests.add(s.name) + test_samples.extend(results) + + print >>sys.stderr, '%s: capturing machine information' % (timestamp(),) + # Collect the machine and run info. + # + # FIXME: Import full range of data that the Clang tests are using? + machine_info = {} + machine_info['hardware'] = capture(["uname","-m"], + include_stderr=True).strip() + machine_info['os'] = capture(["uname","-sr"], include_stderr=True).strip() + if config.cc_reference is not None: + machine_info['gcc_version'] = capture( + [config.cc_reference, '--version'], + include_stderr=True).split('\n')[0] + + # FIXME: We aren't getting the LLCBETA options. + run_info = {} + run_info['tag'] = test_namespace + run_info.update(config.cc_info) + + # Capture sw_vers if this looks like Darwin. + if 'Darwin' in machine_info['os']: + run_info['sw_vers'] = capture(['sw_vers'], include_stderr=True).strip() + + # Query remote properties if in use. + if config.remote: + remote_args = [config.remote_client, + "-l", config.remote_user, + "-p", str(config.remote_port), + config.remote_host] + run_info['remote_uname'] = capture(remote_args + ["uname", "-a"], + include_stderr=True).strip() + + # Capture sw_vers if this looks like Darwin. + if 'Darwin' in run_info['remote_uname']: + run_info['remote_sw_vers'] = capture(remote_args + ["sw_vers"], + include_stderr=True).strip() + + # Query qemu user mode properties if in use. + if config.qemu_user_mode: + run_info['qemu_user_mode'] = config.qemu_user_mode_command + + # Add machine dependent info. + if config.use_machdep_info: + machdep_info = machine_info + else: + machdep_info = run_info + + machdep_info['uname'] = capture(["uname","-a"], include_stderr=True).strip() + machdep_info['name'] = capture(["uname","-n"], include_stderr=True).strip() + + # FIXME: Hack, use better method of getting versions. Ideally, from binaries + # so we are more likely to be accurate. + if config.llvm_source_version is not None: + run_info['llvm_revision'] = config.llvm_source_version + run_info['test_suite_revision'] = test_suite_source_version + run_info.update(public_make_variables) + + # Set the run order from the user, if given. + if config.run_order is not None: + run_info['run_order'] = config.run_order + + else: + # Otherwise, use the inferred run order from the compiler. + run_info['run_order'] = config.cc_info['inferred_run_order'] + + # Add any user specified parameters. + for target,params in ((machine_info, config.machine_parameters), + (run_info, config.run_parameters)): + for entry in params: + if '=' not in entry: + name,value = entry,'' + else: + name,value = entry.split('=', 1) + if name in target: + warning("user parameter %r overwrote existing value: %r" % ( + name, target.get(name))) + print target,name,value + target[name] = value + + # Generate the test report. + lnt_report_path = config.report_path(iteration) + print >>sys.stderr, '%s: generating report: %r' % (timestamp(), + lnt_report_path) + machine = lnt.testing.Machine(nick, machine_info) + run = lnt.testing.Run(start_time, end_time, info = run_info) + + report = lnt.testing.Report(machine, run, test_samples) + lnt_report_file = open(lnt_report_path, 'w') + print >>lnt_report_file,report.render() + lnt_report_file.close() + + return report + +### + +def _construct_report_path(basedir, only_test, test_style, file_type="csv"): + """Get the full path to report files in the sandbox. + """ + report_path = os.path.join(basedir) + if only_test is not None: + report_path = os.path.join(report_path, only_test) + report_path = os.path.join(report_path, ('report.%s.' % test_style) + file_type) + return report_path + +def rerun_test(config, name, num_times): + """Take the test at name, and rerun it num_times with the previous settings + stored in config. + + """ + # Extend the old log file. + logfile = open(config.test_log_path(None), 'a') + + # Grab the real test name instead of the LNT benchmark URL. + real_name = TEST_TO_NAME["nts." + name] + + relative_test_path = os.path.dirname(real_name) + test_name = os.path.basename(real_name) + + test_full_path = os.path.join( + config.report_dir, relative_test_path) + + assert os.path.exists(test_full_path), "Previous test directory not there?" + \ + test_full_path + + results = [] + for _ in xrange(0, num_times): + test_results = _execute_test_again(config, + test_name, + test_full_path, + relative_test_path, + logfile) + results.extend(test_results) + + # Check we got an exec and status from each run. + assert len(results) >= num_times, "Did not get all the runs?" + str(results) + + logfile.close() + return results + + +def _prepare_testsuite_for_rerun(test_name, test_full_path, config): + """Rerun step 1: wipe out old files to get ready for rerun. + + """ + output = os.path.join(test_full_path, "Output/") + test_path_prefix = output + test_name + "." + os.remove(test_path_prefix + "out-" + config.test_style) + + # Remove all the test-suite accounting files for this benchmark + to_go = glob.glob(test_path_prefix + "*.time") + to_go.extend(glob.glob(test_path_prefix + "*.txt")) + to_go.extend(glob.glob(test_path_prefix + "*.csv")) + + assert len(to_go) >= 1, "Missing at least one accounting file." + for path in to_go: + print "Removing:", path + os.remove(path) + + +def _execute_test_again(config, test_name, test_path, test_relative_path, logfile): + """(Re)Execute the benchmark of interest. """ + + _prepare_testsuite_for_rerun(test_name, test_path, config) + + # Grab old make invocation. + mk_vars, _ = config.compute_run_make_variables() + to_exec = ['make', '-k'] + to_exec.extend('%s=%s' % (k, v) for k, v in mk_vars.items()) + + # We need to run the benchmark's makefile, not the global one. + if config.only_test is not None: + to_exec.extend(['-C', config.only_test]) + else: + if test_relative_path: + to_exec.extend(['-C', test_relative_path]) + config.rerun_test = test_relative_path + # The target for the specific benchmark. + # Make target. + benchmark_report_target = "Output/" + test_name + \ + "." + config.test_style + ".report.txt" + # Actual file system location of the target. + benchmark_report_path = os.path.join(config.build_dir(None), + test_path, + benchmark_report_target) + to_exec.append(benchmark_report_target) + + returncode = execute_command(logfile, + config.build_dir(None), to_exec, config.report_dir) + assert returncode == 0, "Remake command failed." + assert os.path.exists(benchmark_report_path), "Missing " \ + "generated report: " + benchmark_report_path + + # Now we need to pull out the results into the CSV format LNT can read. + schema = os.path.join(config.test_suite_root, + "TEST." + config.test_style + ".report") + result_path = os.path.join(config.build_dir(None), + test_path, "Output", + test_name + "." + config.test_style + ".report.csv") + + gen_report_template = "{gen} -csv {schema} < {input} > {output}" + gen_cmd = gen_report_template.format(gen=config.generate_report_script, + schema=schema, input=benchmark_report_path, output=result_path) + bash_gen_cmd = ["/bin/bash", "-c", gen_cmd] + + assert not os.path.exists(result_path), "Results should not exist yet." + \ + result_path + returncode = execute_command(logfile, + config.build_dir(None), bash_gen_cmd, config.report_dir) + assert returncode == 0, "command failed" + assert os.path.exists(result_path), "Missing results file." + + results = load_nt_report_file(result_path, config) + assert len(results) > 0 + return results + +def _unix_quote_args(s): + return map(pipes.quote, shlex.split(s)) + +class NT_Makefile(NT_Buildsystem): + def __init__(self, timestamp, opts): + self.config = TestConfiguration(opts, timestamp) + + # FIXME: We need to validate that there is no configured output in the + # test-suite directory, that borks things. + prepare_report_dir(self.config) + + # These notes are used by the regression tests to check if we've handled + # flags correctly. + note('TARGET_FLAGS: {}'.format(' '.join(self.config.target_flags))) + if self.config.qemu_user_mode: + note('QEMU_USER_MODE_COMMAND: {}'.format(self.config.qemu_user_mode_command)) + + def run(self, nick, multisample=None, compile=True, test=True): + # Makefiles don't support separate compile and test steps - we always + # run both. + return run_test(nick, multisample, self.config) + + def rerun(self, name, num_reruns): + return rerun_test(self.config, name, num_reruns) + + def report_path(self): + return self.config.report_path(None)