Index: packages/Python/lldbsuite/test/__init__.py =================================================================== --- packages/Python/lldbsuite/test/__init__.py +++ packages/Python/lldbsuite/test/__init__.py @@ -3,4 +3,5 @@ from __future__ import absolute_import from . import dotest + run_suite = dotest.run_suite Index: packages/Python/lldbsuite/test/api/check_public_api_headers/TestPublicAPIHeaders.py =================================================================== --- packages/Python/lldbsuite/test/api/check_public_api_headers/TestPublicAPIHeaders.py +++ packages/Python/lldbsuite/test/api/check_public_api_headers/TestPublicAPIHeaders.py @@ -26,8 +26,8 @@ """Test the SB API directory and make sure there's no unwanted stuff.""" # Only proceed if this is an Apple OS, "x86_64", and local platform. - if not (self.platformIsDarwin() and self.getArchitecture() == "x86_64" and not lldb.test_remote): - self.skipTest("This test is only for LLDB.framework built 64-bit and !lldb.test_remote") + if not (self.platformIsDarwin() and self.getArchitecture() == "x86_64" and not configuration.test_remote): + self.skipTest("This test is only for LLDB.framework built 64-bit and !configuration.test_remote") if self.getArchitecture() == "i386": self.skipTest("LLDB is 64-bit and cannot be linked to 32-bit test program.") Index: packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py +++ packages/Python/lldbsuite/test/benchmarks/disassembly/TestDisassembly.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * def is_exe(fpath): @@ -35,7 +36,7 @@ self.function = 'Driver::MainLoop()' self.lldb_avg = None self.gdb_avg = None - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 5 Index: packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py +++ packages/Python/lldbsuite/test/benchmarks/disassembly/TestDoAttachThenDisassembly.py @@ -9,6 +9,7 @@ import os, sys import lldb from lldbsuite.test.lldbbench import * +from lldbsuite.test import configuration class AttachThenDisassemblyBench(BenchBase): @@ -16,11 +17,11 @@ def setUp(self): BenchBase.setUp(self) - if lldb.bmExecutable: - self.exe = lldb.bmExecutable + if configuration.bmExecutable: + self.exe = configuration.bmExecutable else: self.exe = lldbtest_config.lldbExec - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 10 Index: packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py +++ packages/Python/lldbsuite/test/benchmarks/disassembly/TestXcode41Vs42GDBDisassembly.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class XCode41Vs42GDBDisassembly(BenchBase): @@ -20,7 +21,7 @@ self.function = 'Driver::MainLoop()' self.gdb_41_avg = None self.gdb_42_avg = None - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 5 Index: packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py +++ packages/Python/lldbsuite/test/benchmarks/expression/TestExpressionCmd.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class ExpressionEvaluationCase(BenchBase): @@ -16,7 +17,7 @@ BenchBase.setUp(self) self.source = 'main.cpp' self.line_to_break = line_number(self.source, '// Set breakpoint here.') - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 25 Index: packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py +++ packages/Python/lldbsuite/test/benchmarks/expression/TestRepeatedExprs.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class RepeatedExprsCase(BenchBase): @@ -18,7 +19,7 @@ self.line_to_break = line_number(self.source, '// Set breakpoint here.') self.lldb_avg = None self.gdb_avg = None - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 100 Index: packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py +++ packages/Python/lldbsuite/test/benchmarks/frame_variable/TestFrameVariableResponse.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class FrameVariableResponseBench(BenchBase): @@ -14,16 +15,16 @@ def setUp(self): BenchBase.setUp(self) - if lldb.bmExecutable: - self.exe = lldb.bmExecutable + if configuration.bmExecutable: + self.exe = configuration.bmExecutable else: self.exe = lldbtest_config.lldbExec - if lldb.bmBreakpointSpec: - self.break_spec = lldb.bmBreakpointSpec + if configuration.bmBreakpointSpec: + self.break_spec = configuration.bmBreakpointSpec else: self.break_spec = '-n main' - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 20 Index: packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py +++ packages/Python/lldbsuite/test/benchmarks/startup/TestStartupDelays.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class StartupDelaysBench(BenchBase): @@ -19,16 +20,16 @@ self.stopwatch2 = Stopwatch() # Create self.stopwatch3 for measuring "run to breakpoint". self.stopwatch3 = Stopwatch() - if lldb.bmExecutable: - self.exe = lldb.bmExecutable + if configuration.bmExecutable: + self.exe = configuration.bmExecutable else: self.exe = lldbtest_config.lldbExec - if lldb.bmBreakpointSpec: - self.break_spec = lldb.bmBreakpointSpec + if configuration.bmBreakpointSpec: + self.break_spec = configuration.bmBreakpointSpec else: self.break_spec = '-n main' - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 30 Index: packages/Python/lldbsuite/test/benchmarks/stepping/TestRunHooksThenSteppings.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/stepping/TestRunHooksThenSteppings.py +++ packages/Python/lldbsuite/test/benchmarks/stepping/TestRunHooksThenSteppings.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class RunHooksThenSteppingsBench(BenchBase): @@ -14,7 +15,7 @@ def setUp(self): BenchBase.setUp(self) - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 50 Index: packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py +++ packages/Python/lldbsuite/test/benchmarks/stepping/TestSteppingSpeed.py @@ -2,10 +2,9 @@ from __future__ import print_function - - import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class SteppingSpeedBench(BenchBase): @@ -14,16 +13,16 @@ def setUp(self): BenchBase.setUp(self) - if lldb.bmExecutable: - self.exe = lldb.bmExecutable + if configuration.bmExecutable: + self.exe = configuration.bmExecutable else: self.exe = lldbtest_config.lldbExec - if lldb.bmBreakpointSpec: - self.break_spec = lldb.bmBreakpointSpec + if configuration.bmBreakpointSpec: + self.break_spec = configuration.bmBreakpointSpec else: self.break_spec = '-n main' - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 50 Index: packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py =================================================================== --- packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py +++ packages/Python/lldbsuite/test/benchmarks/turnaround/TestCompileRunToBreakpointTurnaround.py @@ -6,6 +6,7 @@ import os, sys import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbbench import * class CompileRunToBreakpointBench(BenchBase): @@ -17,7 +18,7 @@ self.exe = lldbtest_config.lldbExec self.function = 'Driver::MainLoop()' - self.count = lldb.bmIterationCount + self.count = configuration.bmIterationCount if self.count <= 0: self.count = 3 Index: packages/Python/lldbsuite/test/configuration.py =================================================================== --- /dev/null +++ packages/Python/lldbsuite/test/configuration.py @@ -0,0 +1,224 @@ +""" + The LLVM Compiler Infrastructure + +This file is distributed under the University of Illinois Open Source +License. See LICENSE.TXT for details. + +Provides the configuration class, which holds all information related to +how this invocation of the test suite should be run. +""" + +from __future__ import absolute_import +from __future__ import print_function + +# System modules +import os +import platform +import subprocess + + +# Third-party modules +import unittest2 + +# LLDB Modules +import lldbsuite + +def __setCrashInfoHook_Mac(text): + from . import crashinfo + crashinfo.setCrashReporterDescription(text) + +def __setupCrashInfoHook(): + if platform.system() == "Darwin": + from . import lock + test_dir = os.environ['LLDB_TEST'] + if not test_dir or not os.path.exists(test_dir): + return + dylib_lock = os.path.join(test_dir,"crashinfo.lock") + dylib_src = os.path.join(test_dir,"crashinfo.c") + dylib_dst = os.path.join(test_dir,"crashinfo.so") + try: + compile_lock = lock.Lock(dylib_lock) + compile_lock.acquire() + if not os.path.isfile(dylib_dst) or os.path.getmtime(dylib_dst) < os.path.getmtime(dylib_src): + # we need to compile + cmd = "SDKROOT= xcrun clang %s -o %s -framework Python -Xlinker -dylib -iframework /System/Library/Frameworks/ -Xlinker -F /System/Library/Frameworks/" % (dylib_src,dylib_dst) + if subprocess.call(cmd,shell=True) != 0 or not os.path.isfile(dylib_dst): + raise Exception('command failed: "{}"'.format(cmd)) + finally: + compile_lock.release() + del compile_lock + + setCrashInfoHook = setCrashInfoHook_Mac + + else: + pass + +# The test suite. +suite = unittest2.TestSuite() + +# By default, benchmarks tests are not run. +just_do_benchmarks_test = False + +dont_do_dsym_test = False +dont_do_dwarf_test = False +dont_do_dwo_test = False + +# The blacklist is optional (-b blacklistFile) and allows a central place to skip +# testclass's and/or testclass.testmethod's. +blacklist = None + +# The dictionary as a result of sourcing blacklistFile. +blacklistConfig = {} + +# The list of categories we said we care about +categoriesList = None +# set to true if we are going to use categories for cherry-picking test cases +useCategories = False +# Categories we want to skip +skipCategories = [] +# use this to track per-category failures +failuresPerCategory = {} + +# The path to LLDB.framework is optional. +lldbFrameworkPath = None + +# The config file is optional. +configFile = None + +# Test suite repeat count. Can be overwritten with '-# count'. +count = 1 + +# The dictionary as a result of sourcing configFile. +config = {} +# The pre_flight and post_flight functions come from reading a config file. +pre_flight = None +post_flight = None +# So do the lldbtest_remote_sandbox and lldbtest_remote_shell_template variables. +test_remote = False +lldbtest_remote_sandbox = None +lldbtest_remote_shell_template = None + +# The 'archs' and 'compilers' can be specified via either command line or configFile, +# with the command line overriding the configFile. The corresponding options can be +# specified more than once. For example, "-A x86_64 -A i386" => archs=['x86_64', 'i386'] +# and "-C gcc -C clang" => compilers=['gcc', 'clang']. +archs = None # Must be initialized after option parsing +compilers = None # Must be initialized after option parsing + +# The arch might dictate some specific CFLAGS to be passed to the toolchain to build +# the inferior programs. The global variable cflags_extras provides a hook to do +# just that. +cflags_extras = '' + +# Dump the Python sys.path variable. Use '-D' to dump sys.path. +dumpSysPath = False + +# Full path of the benchmark executable, as specified by the '-e' option. +bmExecutable = None +# The breakpoint specification of bmExecutable, as specified by the '-x' option. +bmBreakpointSpec = None +# The benchmark iteration count, as specified by the '-y' option. +bmIterationCount = -1 + +# By default, don't exclude any directories. Use '-X' to add one excluded directory. +excluded = set(['.svn', '.git']) + +# By default, failfast is False. Use '-F' to overwrite it. +failfast = False + +# The filters (testclass.testmethod) used to admit tests into our test suite. +filters = [] + +# The runhooks is a list of lldb commands specifically for the debugger. +# Use '-k' to specify a runhook. +runHooks = [] + +# If '-g' is specified, the filterspec is not exclusive. If a test module does +# not contain testclass.testmethod which matches the filterspec, the whole test +# module is still admitted into our test suite. fs4all flag defaults to True. +fs4all = True + +# Ignore the build search path relative to this script to locate the lldb.py module. +ignore = False + +# By default, we do not skip build and cleanup. Use '-S' option to override. +skip_build_and_cleanup = False + +# By default, we skip long running test case. Use '-l' option to override. +skip_long_running_test = True + +# By default, we print the build dir, lldb version, and svn info. Use '-n' option to +# turn it off. +noHeaders = False + +# Parsable mode silences headers, and any other output this script might generate, and instead +# prints machine-readable output similar to what clang tests produce. +parsable = False + +# The regular expression pattern to match against eligible filenames as our test cases. +regexp = None + +# By default, tests are executed in place and cleanups are performed afterwards. +# Use '-r dir' option to relocate the tests and their intermediate files to a +# different directory and to forgo any cleanups. The directory specified must +# not exist yet. +rdir = None + +# By default, recorded session info for errored/failed test are dumped into its +# own file under a session directory named after the timestamp of the test suite +# run. Use '-s session-dir-name' to specify a specific dir name. +sdir_name = None + +# Set this flag if there is any session info dumped during the test run. +sdir_has_content = False + +# svn_info stores the output from 'svn info lldb.base.dir'. +svn_info = '' + +# svn_silent means do not try to obtain svn status +svn_silent = True + +# Default verbosity is 0. +verbose = 1 + +# Set to True only if verbose is 0 and LLDB trace mode is off. +progress_bar = False + +# By default, search from the script directory. +# We can't use sys.path[0] to determine the script directory +# because it doesn't work under a debugger +testdirs = [ os.path.dirname(os.path.realpath(__file__)) ] + +# Separator string. +separator = '-' * 70 + +failed = False + +# LLDB Remote platform setting +lldb_platform_name = None +lldb_platform_url = None +lldb_platform_working_dir = None + +# Parallel execution settings +is_inferior_test_runner = False +multiprocess_test_subdir = None +num_threads = None +output_on_success = False +no_multiprocess_test_runner = False +test_runner_name = None + +# Test results handling globals +results_filename = None +results_port = None +results_file_object = None +results_formatter_name = None +results_formatter_object = None +results_formatter_options = None +test_result = None + +# The names of all tests. Used to assert we don't have two tests with the same base name. +all_tests = set() + +# safe default +setCrashInfoHook = lambda x : None +__setupCrashInfoHook() Index: packages/Python/lldbsuite/test/dotest.py =================================================================== --- packages/Python/lldbsuite/test/dotest.py +++ packages/Python/lldbsuite/test/dotest.py @@ -40,6 +40,7 @@ # LLDB Modules import lldbsuite +from . import configuration from . import dotest_args from . import lldbtest_config from . import test_categories @@ -82,174 +83,9 @@ # # Global variables: # - -# The test suite. -suite = unittest2.TestSuite() - -# By default, benchmarks tests are not run. -just_do_benchmarks_test = False - -dont_do_dsym_test = False -dont_do_dwarf_test = False -dont_do_dwo_test = False - -# The blacklist is optional (-b blacklistFile) and allows a central place to skip -# testclass's and/or testclass.testmethod's. -blacklist = None - -# The dictionary as a result of sourcing blacklistFile. -blacklistConfig = {} - -# The list of categories we said we care about -categoriesList = None -# set to true if we are going to use categories for cherry-picking test cases -useCategories = False -# Categories we want to skip -skipCategories = [] -# use this to track per-category failures -failuresPerCategory = {} - -# The path to LLDB.framework is optional. -lldbFrameworkPath = None - -# The config file is optional. -configFile = None - -# Test suite repeat count. Can be overwritten with '-# count'. -count = 1 - -# The dictionary as a result of sourcing configFile. -config = {} -# The pre_flight and post_flight functions come from reading a config file. -pre_flight = None -post_flight = None -# So do the lldbtest_remote_sandbox and lldbtest_remote_shell_template variables. -lldbtest_remote_sandbox = None -lldbtest_remote_shell_template = None - -# The 'archs' and 'compilers' can be specified via either command line or configFile, -# with the command line overriding the configFile. The corresponding options can be -# specified more than once. For example, "-A x86_64 -A i386" => archs=['x86_64', 'i386'] -# and "-C gcc -C clang" => compilers=['gcc', 'clang']. -archs = None # Must be initialized after option parsing -compilers = None # Must be initialized after option parsing - -# The arch might dictate some specific CFLAGS to be passed to the toolchain to build -# the inferior programs. The global variable cflags_extras provides a hook to do -# just that. -cflags_extras = '' - -# Dump the Python sys.path variable. Use '-D' to dump sys.path. -dumpSysPath = False - -# Full path of the benchmark executable, as specified by the '-e' option. -bmExecutable = None -# The breakpoint specification of bmExecutable, as specified by the '-x' option. -bmBreakpointSpec = None -# The benchmark iteration count, as specified by the '-y' option. -bmIterationCount = -1 - -# By default, don't exclude any directories. Use '-X' to add one excluded directory. -excluded = set(['.svn', '.git']) - -# By default, failfast is False. Use '-F' to overwrite it. -failfast = False - -# The filters (testclass.testmethod) used to admit tests into our test suite. -filters = [] - -# The runhooks is a list of lldb commands specifically for the debugger. -# Use '-k' to specify a runhook. -runHooks = [] - -# If '-g' is specified, the filterspec is not exclusive. If a test module does -# not contain testclass.testmethod which matches the filterspec, the whole test -# module is still admitted into our test suite. fs4all flag defaults to True. -fs4all = True - -# Ignore the build search path relative to this script to locate the lldb.py module. -ignore = False - -# By default, we do not skip build and cleanup. Use '-S' option to override. -skip_build_and_cleanup = False - -# By default, we skip long running test case. Use '-l' option to override. -skip_long_running_test = True - -# By default, we print the build dir, lldb version, and svn info. Use '-n' option to -# turn it off. -noHeaders = False - -# Parsable mode silences headers, and any other output this script might generate, and instead -# prints machine-readable output similar to what clang tests produce. -parsable = False - -# The regular expression pattern to match against eligible filenames as our test cases. -regexp = None - -# By default, tests are executed in place and cleanups are performed afterwards. -# Use '-r dir' option to relocate the tests and their intermediate files to a -# different directory and to forgo any cleanups. The directory specified must -# not exist yet. -rdir = None - -# By default, recorded session info for errored/failed test are dumped into its -# own file under a session directory named after the timestamp of the test suite -# run. Use '-s session-dir-name' to specify a specific dir name. -sdir_name = None - -# Set this flag if there is any session info dumped during the test run. -sdir_has_content = False - -# svn_info stores the output from 'svn info lldb.base.dir'. -svn_info = '' - -# svn_silent means do not try to obtain svn status -svn_silent = True - -# Default verbosity is 0. -verbose = 1 - -# Set to True only if verbose is 0 and LLDB trace mode is off. -progress_bar = False - -# By default, search from the script directory. -# We can't use sys.path[0] to determine the script directory -# because it doesn't work under a debugger -testdirs = [ os.path.dirname(os.path.realpath(__file__)) ] - -# Separator string. -separator = '-' * 70 - -failed = False - -# LLDB Remote platform setting -lldb_platform_name = None -lldb_platform_url = None -lldb_platform_working_dir = None - -# Parallel execution settings -is_inferior_test_runner = False -multiprocess_test_subdir = None -num_threads = None -output_on_success = False -no_multiprocess_test_runner = False -test_runner_name = None - -# Test results handling globals -results_filename = None -results_port = None -results_file_object = None -results_formatter_name = None -results_formatter_object = None -results_formatter_options = None - -# The names of all tests. Used to assert we don't have two tests with the same base name. -all_tests = set() - def usage(parser): parser.print_help() - if verbose > 0: + if configuration.verbose > 0: print(""" Examples: @@ -361,63 +197,12 @@ """) sys.exit(0) - -def setCrashInfoHook_Mac(text): - from . import crashinfo - crashinfo.setCrashReporterDescription(text) - -# implement this in some suitable way for your platform, and then bind it -# to setCrashInfoHook -def setCrashInfoHook_NonMac(text): - pass - -setCrashInfoHook = None - -def deleteCrashInfoDylib(dylib_path): - try: - # Need to modify this to handle multiple tests running at the same time. If we move this - # to the test's real dir, all should be we run sequentially within a test directory. - # os.remove(dylib_path) - None - finally: - pass - -def setupCrashInfoHook(): - global setCrashInfoHook - setCrashInfoHook = setCrashInfoHook_NonMac # safe default - if platform.system() == "Darwin": - from . import lock - test_dir = os.environ['LLDB_TEST'] - if not test_dir or not os.path.exists(test_dir): - return - dylib_lock = os.path.join(test_dir,"crashinfo.lock") - dylib_src = os.path.join(test_dir,"crashinfo.c") - dylib_dst = os.path.join(test_dir,"crashinfo.so") - try: - compile_lock = lock.Lock(dylib_lock) - compile_lock.acquire() - if not os.path.isfile(dylib_dst) or os.path.getmtime(dylib_dst) < os.path.getmtime(dylib_src): - # we need to compile - cmd = "SDKROOT= xcrun clang %s -o %s -framework Python -Xlinker -dylib -iframework /System/Library/Frameworks/ -Xlinker -F /System/Library/Frameworks/" % (dylib_src,dylib_dst) - if subprocess.call(cmd,shell=True) != 0 or not os.path.isfile(dylib_dst): - raise Exception('command failed: "{}"'.format(cmd)) - finally: - compile_lock.release() - del compile_lock - - setCrashInfoHook = setCrashInfoHook_Mac - - else: - pass - def shouldSkipBecauseOfCategories(test_categories): - global useCategories, categoriesList, skipCategories - - if useCategories: - if len(test_categories) == 0 or len(categoriesList & set(test_categories)) == 0: + if configuration.useCategories: + if len(test_categories) == 0 or len(configuration.categoriesList & set(test_categories)) == 0: return True - for category in skipCategories: + for category in configuration.skipCategories: if category in test_categories: return True @@ -429,56 +214,6 @@ '-h/--help as the first option prints out usage info and exit the program. """ - global just_do_benchmarks_test - global dont_do_dsym_test - global dont_do_dwarf_test - global dont_do_dwo_test - global blacklist - global blacklistConfig - global categoriesList - global validCategories - global useCategories - global skipCategories - global lldbFrameworkPath - global configFile - global archs - global compilers - global count - global dumpSysPath - global bmExecutable - global bmBreakpointSpec - global bmIterationCount - global failfast - global filters - global fs4all - global ignore - global progress_bar - global runHooks - global skip_build_and_cleanup - global skip_long_running_test - global noHeaders - global parsable - global regexp - global rdir - global sdir_name - global svn_silent - global verbose - global testdirs - global lldb_platform_name - global lldb_platform_url - global lldb_platform_working_dir - global setCrashInfoHook - global is_inferior_test_runner - global multiprocess_test_subdir - global num_threads - global output_on_success - global no_multiprocess_test_runner - global test_runner_name - global results_filename - global results_formatter_name - global results_formatter_options - global results_port - do_help = False platform_system = platform.system() @@ -511,17 +246,17 @@ do_help = True if args.compilers: - compilers = args.compilers + configuration.compilers = args.compilers else: # Use a compiler appropriate appropriate for the Apple SDK if one was specified if platform_system == 'Darwin' and args.apple_sdk: - compilers = [seven.get_command_output('xcrun -sdk "%s" -find clang 2> /dev/null' % (args.apple_sdk))] + configuration.compilers = [seven.get_command_output('xcrun -sdk "%s" -find clang 2> /dev/null' % (args.apple_sdk))] else: # 'clang' on ubuntu 14.04 is 3.4 so we try clang-3.5 first candidateCompilers = ['clang-3.5', 'clang', 'gcc'] for candidate in candidateCompilers: if which(candidate): - compilers = [candidate] + configuration.compilers = [candidate] break if args.channels: @@ -535,26 +270,26 @@ os.environ['SDKROOT'] = seven.get_command_output('xcrun --sdk "%s" --show-sdk-path 2> /dev/null' % (args.apple_sdk)) if args.archs: - archs = args.archs - for arch in archs: + configuration.archs = args.archs + for arch in configuration.archs: if arch.startswith('arm') and platform_system == 'Darwin' and not args.apple_sdk: os.environ['SDKROOT'] = seven.get_command_output('xcrun --sdk iphoneos.internal --show-sdk-path 2> /dev/null') if not os.path.exists(os.environ['SDKROOT']): os.environ['SDKROOT'] = seven.get_command_output('xcrun --sdk iphoneos --show-sdk-path 2> /dev/null') else: - archs = [platform_machine] + configuration.archs = [platform_machine] if args.categoriesList: - categoriesList = set(test_categories.validate(args.categoriesList, False)) - useCategories = True + configuration.categoriesList = set(test_categories.validate(args.categoriesList, False)) + configuration.useCategories = True else: - categoriesList = [] + configuration.categoriesList = [] if args.skipCategories: - skipCategories = test_categories.validate(args.skipCategories, False) + configuration.skipCategories = test_categories.validate(args.skipCategories, False) if args.D: - dumpSysPath = True + configuration.dumpSysPath = True if args.E: cflags_extras = args.E @@ -562,11 +297,11 @@ # argparse makes sure we have correct options if args.N == 'dwarf': - dont_do_dwarf_test = True + configuration.dont_do_dwarf_test = True elif args.N == 'dwo': - dont_do_dwo_test = True + configuration.dont_do_dwo_test = True elif args.N == 'dsym': - dont_do_dsym_test = True + configuration.dont_do_dsym_test = True if args.a or args.plus_a: print("Options '-a' and '+a' have been deprecated. Please use the test category\n" @@ -579,7 +314,7 @@ sys.exit(1) if args.plus_b: - just_do_benchmarks_test = True + lldbsuite.test.just_do_benchmarks_test = True if args.b: if args.b.startswith('-'): @@ -589,15 +324,15 @@ print('Blacklist file:', blacklistFile, 'does not exist!') usage(parser) # Now read the blacklist contents and assign it to blacklist. - execfile(blacklistFile, globals(), blacklistConfig) - blacklist = blacklistConfig.get('blacklist') + execfile(blacklistFile, globals(), configuration.blacklistConfig) + configuration.blacklist = configuration.blacklistConfig.get('blacklist') if args.c: if args.c.startswith('-'): usage(parser) - configFile = args.c - if not os.path.isfile(configFile): - print('Config file:', configFile, 'does not exist!') + configuration.configFile = args.c + if not os.path.isfile(configuration.configFile): + print('Config file:', configuration.configFile, 'does not exist!') usage(parser) if args.d: @@ -608,17 +343,17 @@ if args.e: if args.e.startswith('-'): usage(parser) - bmExecutable = args.e - if not is_exe(bmExecutable): + configuration.bmExecutable = args.e + if not is_exe(configuration.bmExecutable): usage(parser) if args.F: - failfast = True + configuration.failfast = True if args.f: if any([x.startswith('-') for x in args.f]): usage(parser) - filters.extend(args.f) + configuration.filters.extend(args.f) # Shut off multiprocessing mode when additional filters are specified. # The rational is that the user is probably going after a very specific # test and doesn't need a bunch of parallel test runners all looking for @@ -629,22 +364,22 @@ # manner. If we really need filtered parallel runs in the future, consider # adding a --no-output-on-success that prevents -v from setting # output-on-success. - no_multiprocess_test_runner = True + configuration.no_multiprocess_test_runner = True if args.g: - fs4all = False + configuration.fs4all = False if args.i: - ignore = True + configuration.ignore = True if args.k: - runHooks.extend(args.k) + configuration.runHooks.extend(args.k) if args.l: - skip_long_running_test = False + configuration.skip_long_running_test = False if args.framework: - lldbFrameworkPath = args.framework + configuration.lldbFrameworkPath = args.framework if args.executable: lldbtest_config.lldbExec = args.executable @@ -653,54 +388,54 @@ os.environ["LIBCXX_PATH"] = args.libcxx if args.n: - noHeaders = True + configuration.noHeaders = True if args.p: if args.p.startswith('-'): usage(parser) - regexp = args.p + configuration.regexp = args.p if args.q: - noHeaders = True - parsable = True + configuration.noHeaders = True + configuration.parsable = True if args.P and not args.v: - progress_bar = True - verbose = 0 + configuration.progress_bar = True + configuration.verbose = 0 if args.R: if args.R.startswith('-'): usage(parser) - rdir = os.path.abspath(args.R) - if os.path.exists(rdir): + configuration.rdir = os.path.abspath(args.R) + if os.path.exists(configuration.rdir): import shutil - print('Removing tree:', rdir) - shutil.rmtree(rdir) + print('Removing tree:', configuration.rdir) + shutil.rmtree(configuration.rdir) if args.r: if args.r.startswith('-'): usage(parser) - rdir = os.path.abspath(args.r) - if os.path.exists(rdir): - print('Relocated directory:', rdir, 'must not exist!') + configuration.rdir = os.path.abspath(args.r) + if os.path.exists(configuration.rdir): + print('Relocated directory:', configuration.rdir, 'must not exist!') usage(parser) if args.S: - skip_build_and_cleanup = True + configuration.skip_build_and_cleanup = True if args.s: if args.s.startswith('-'): usage(parser) - sdir_name = args.s + configuration.sdir_name = args.s if args.t: os.environ['LLDB_COMMAND_TRACE'] = 'YES' if args.T: - svn_silent = False + configuration.svn_silent = False if args.v: - verbose = 2 + configuration.verbose = 2 if args.w: os.environ['LLDB_WAIT_BETWEEN_TEST_CASES'] = 'YES' @@ -708,20 +443,20 @@ if args.X: if args.X.startswith('-'): usage(parser) - excluded.add(args.X) + configuration.excluded.add(args.X) if args.x: if args.x.startswith('-'): usage(parser) - bmBreakpointSpec = args.x + configuration.bmBreakpointSpec = args.x # argparse makes sure we have a number if args.y: - bmIterationCount = args.y + configuration.bmIterationCount = args.y # argparse makes sure we have a number if args.sharp: - count = args.sharp + configuration.count = args.sharp if sys.platform.startswith('win32'): os.environ['LLDB_DISABLE_CRASH_DIALOG'] = str(args.disable_crash_dialog) @@ -731,23 +466,23 @@ usage(parser) if args.no_multiprocess: - no_multiprocess_test_runner = True + configuration.no_multiprocess_test_runner = True if args.inferior: - is_inferior_test_runner = True + configuration.is_inferior_test_runner = True # Turn on output_on_sucess if either explicitly added or -v specified. if args.output_on_success or args.v: - output_on_success = True + configuration.output_on_success = True if args.num_threads: - num_threads = args.num_threads + configuration.num_threads = args.num_threads if args.test_subdir: - multiprocess_test_subdir = args.test_subdir + configuration.multiprocess_test_subdir = args.test_subdir if args.test_runner_name: - test_runner_name = args.test_runner_name + configuration.test_runner_name = args.test_runner_name # Capture test results-related args. if args.curses and not args.inferior: @@ -756,10 +491,10 @@ args.results_file = "stdout" if args.results_file: - results_filename = args.results_file + configuration.results_filename = args.results_file if args.results_port: - results_port = args.results_port + configuration.results_port = args.results_port if args.results_file and args.results_port: sys.stderr.write( @@ -768,16 +503,16 @@ usage(args) if args.results_formatter: - results_formatter_name = args.results_formatter + configuration.results_formatter_name = args.results_formatter if args.results_formatter_options: - results_formatter_options = args.results_formatter_options + configuration.results_formatter_options = args.results_formatter_options if args.lldb_platform_name: - lldb_platform_name = args.lldb_platform_name + configuration.lldb_platform_name = args.lldb_platform_name if args.lldb_platform_url: - lldb_platform_url = args.lldb_platform_url + configuration.lldb_platform_url = args.lldb_platform_url if args.lldb_platform_working_dir: - lldb_platform_working_dir = args.lldb_platform_working_dir + configuration.lldb_platform_working_dir = args.lldb_platform_working_dir if args.event_add_entries and len(args.event_add_entries) > 0: entries = {} @@ -799,18 +534,18 @@ # Gather all the dirs passed on the command line. if len(args.args) > 0: - testdirs = list(map(os.path.abspath, args.args)) + configuration.testdirs = list(map(os.path.abspath, args.args)) # Shut off multiprocessing mode when test directories are specified. - no_multiprocess_test_runner = True + configuration.no_multiprocess_test_runner = True # If '-r dir' is specified, the tests should be run under the relocated # directory. Let's copy the testdirs over. - if rdir: + if configuration.rdir: from shutil import copytree, ignore_patterns tmpdirs = [] - orig_testdirs = testdirs[:] - for srcdir in testdirs: + orig_testdirs = configuration.testdirs[:] + for srcdir in configuration.testdirs: # For example, /Volumes/data/lldb/svn/ToT/test/functionalities/watchpoint/hello_watchpoint # shall be split into ['/Volumes/data/lldb/svn/ToT/', 'functionalities/watchpoint/hello_watchpoint']. # Utilize the relative path to the 'test' directory to make our destination dir path. @@ -818,14 +553,14 @@ to_split_on = "test" + os.sep else: to_split_on = "test" - dstdir = os.path.join(rdir, srcdir.split(to_split_on)[1]) + dstdir = os.path.join(configuration.rdir, srcdir.split(to_split_on)[1]) dstdir = dstdir.rstrip(os.sep) # Don't copy the *.pyc and .svn stuffs. copytree(srcdir, dstdir, ignore=ignore_patterns('*.pyc', '.svn')) tmpdirs.append(dstdir) # This will be our modified testdirs. - testdirs = tmpdirs + configuration.testdirs = tmpdirs # With '-r dir' specified, there's no cleanup of intermediate test files. os.environ["LLDB_DO_CLEANUP"] = 'NO' @@ -838,7 +573,7 @@ # array_types conditional_break make # # where the make directory contains the Makefile.rules file. - if len(testdirs) != 1 or os.path.basename(orig_testdirs[0]) != 'test': + if len(configuration.testdirs) != 1 or os.path.basename(orig_testdirs[0]) != 'test': scriptdir = os.path.dirname(__file__) # Don't copy the .svn stuffs. copytree(os.path.join(scriptdir, 'make'), os.path.join(rdir, 'make'), @@ -857,25 +592,24 @@ # respectively. # # See also lldb-trunk/examples/test/usage-config. - global config, pre_flight, post_flight, lldbtest_remote_sandbox, lldbtest_remote_shell_template - if configFile: + if configuration.configFile: # Pass config (a dictionary) as the locals namespace for side-effect. - execfile(configFile, globals(), config) + execfile(configuration.configFile, globals(), configuration.config) #print("config:", config) - if "pre_flight" in config: - pre_flight = config["pre_flight"] - if not six.callable(pre_flight): + if "pre_flight" in configuration.config: + configuration.pre_flight = configuration.config["pre_flight"] + if not six.callable(configuration.pre_flight): print("fatal error: pre_flight is not callable, exiting.") sys.exit(1) - if "post_flight" in config: - post_flight = config["post_flight"] - if not six.callable(post_flight): + if "post_flight" in configuration.config: + configuration.post_flight = configuration.config["post_flight"] + if not six.callable(configuration.post_flight): print("fatal error: post_flight is not callable, exiting.") sys.exit(1) - if "lldbtest_remote_sandbox" in config: - lldbtest_remote_sandbox = config["lldbtest_remote_sandbox"] - if "lldbtest_remote_shell_template" in config: - lldbtest_remote_shell_template = config["lldbtest_remote_shell_template"] + if "lldbtest_remote_sandbox" in configuration.config: + configuration.lldbtest_remote_sandbox = configuration.config["lldbtest_remote_sandbox"] + if "lldbtest_remote_shell_template" in configuration.config: + configuration.lldbtest_remote_shell_template = configuration.config["lldbtest_remote_shell_template"] #print("sys.stderr:", sys.stderr) #print("sys.stdout:", sys.stdout) @@ -909,47 +643,40 @@ def setupTestResults(): """Sets up test results-related objects based on arg settings.""" - global results_filename - global results_file_object - global results_formatter_name - global results_formatter_object - global results_formatter_options - global results_port - default_formatter_name = None cleanup_func = None - if results_filename: + if configuration.results_filename: # Open the results file for writing. - if results_filename == 'stdout': - results_file_object = sys.stdout + if configuration.results_filename == 'stdout': + configuration.results_file_object = sys.stdout cleanup_func = None - elif results_filename == 'stderr': - results_file_object = sys.stderr + elif configuration.results_filename == 'stderr': + configuration.results_file_object = sys.stderr cleanup_func = None else: - results_file_object = open(results_filename, "w") - cleanup_func = results_file_object.close + configuration.results_file_object = open(configuration.results_filename, "w") + cleanup_func = configuration.results_file_object.close default_formatter_name = "lldbsuite.test.result_formatter.XunitFormatter" - elif results_port: + elif configuration.results_port: # Connect to the specified localhost port. - results_file_object, cleanup_func = createSocketToLocalPort( - results_port) + configuration.results_file_object, cleanup_func = createSocketToLocalPort( + configuration.results_port) default_formatter_name = ( "lldbsuite.test.result_formatter.RawPickledFormatter") # If we have a results formatter name specified and we didn't specify # a results file, we should use stdout. - if results_formatter_name is not None and results_file_object is None: + if configuration.results_formatter_name is not None and configuration.results_file_object is None: # Use stdout. - results_file_object = sys.stdout + configuration.results_file_object = sys.stdout cleanup_func = None - if results_file_object: + if configuration.results_file_object: # We care about the formatter. Choose user-specified or, if # none specified, use the default for the output type. - if results_formatter_name: - formatter_name = results_formatter_name + if configuration.results_formatter_name: + formatter_name = configuration.results_formatter_name else: formatter_name = default_formatter_name @@ -963,8 +690,8 @@ # Handle formatter options for the results formatter class. formatter_arg_parser = clazz.arg_parser() - if results_formatter_options and len(results_formatter_options) > 0: - command_line_options = results_formatter_options + if configuration.results_formatter_options and len(configuration.results_formatter_options) > 0: + command_line_options = configuration.results_formatter_options else: command_line_options = [] @@ -972,30 +699,30 @@ command_line_options) # Create the TestResultsFormatter given the processed options. - results_formatter_object = clazz( - results_file_object, formatter_options) + configuration.results_formatter_object = clazz( + configuration.results_file_object, formatter_options) # Start the results formatter session - we'll only have one # during a given dotest process invocation. initialize_event = EventBuilder.bare_event("initialize") if isMultiprocessTestRunner(): - if test_runner_name is not None and test_runner_name == "serial": + if configuration.test_runner_name is not None and test_runner_name == "serial": # Only one worker queue here. worker_count = 1 else: # Workers will be the number of threads specified. - worker_count = num_threads + worker_count = configuration.num_threads else: worker_count = 1 initialize_event["worker_count"] = worker_count - results_formatter_object.handle_event(initialize_event) + configuration.results_formatter_object.handle_event(initialize_event) def shutdown_formatter(): # Tell the formatter to write out anything it may have # been saving until the very end (e.g. xUnit results # can't complete its output until this point). - results_formatter_object.send_terminate_as_needed() + configuration.results_formatter_object.send_terminate_as_needed() # And now close out the output file-like object. if cleanup_func is not None: @@ -1034,14 +761,6 @@ As a side effect, we also discover the 'lldb' executable and export it here. """ - global rdir - global testdirs - global dumpSysPath - global noHeaders - global svn_info - global svn_silent - global lldbFrameworkPath - # Get the directory containing the current script. if "DOTEST_PROFILE" in os.environ and "DOTEST_SCRIPT_DIR" in os.environ: scriptPath = os.environ["DOTEST_SCRIPT_DIR"] @@ -1051,15 +770,15 @@ print("This script expects to reside in lldb's test directory.") sys.exit(-1) - if rdir: + if configuration.rdir: # Set up the LLDB_TEST environment variable appropriately, so that the # individual tests can be located relatively. # # See also lldbtest.TestBase.setUpClass(cls). - if len(testdirs) == 1 and os.path.basename(testdirs[0]) == 'test': - os.environ["LLDB_TEST"] = os.path.join(rdir, 'test') + if len(configuration.testdirs) == 1 and os.path.basename(configuration.testdirs[0]) == 'test': + os.environ["LLDB_TEST"] = os.path.join(configuration.rdir, 'test') else: - os.environ["LLDB_TEST"] = rdir + os.environ["LLDB_TEST"] = configuration.rdir else: os.environ["LLDB_TEST"] = scriptPath @@ -1119,7 +838,7 @@ os.environ["LLDB_LIB_DIR"] = lldbLibDir lldbImpLibDir = os.path.join(lldbLibDir, '..', 'lib') if sys.platform.startswith('win32') else lldbLibDir os.environ["LLDB_IMPLIB_DIR"] = lldbImpLibDir - if not noHeaders: + if not configuration.noHeaders: print("LLDB library dir:", os.environ["LLDB_LIB_DIR"]) print("LLDB import library dir:", os.environ["LLDB_IMPLIB_DIR"]) os.system('%s -v' % lldbtest_config.lldbExec) @@ -1132,34 +851,32 @@ if not lldbMiExec: if not shouldSkipBecauseOfCategories(["lldb-mi"]): print("The 'lldb-mi' executable cannot be located. The lldb-mi tests can not be run as a result.") - skipCategories.append("lldb-mi") + configuration.skipCategories.append("lldb-mi") else: os.environ["LLDBMI_EXEC"] = lldbMiExec # Skip printing svn/git information when running in parsable (lit-test compatibility) mode - if not svn_silent and not parsable: + if not configuration.svn_silent and not configuration.parsable: if os.path.isdir(os.path.join(lldbRootDirectory, '.svn')) and which("svn") is not None: pipe = subprocess.Popen([which("svn"), "info", lldbRootDirectory], stdout = subprocess.PIPE) - svn_info = pipe.stdout.read() + configuration.svn_info = pipe.stdout.read() elif os.path.isdir(os.path.join(lldbRootDirectory, '.git')) and which("git") is not None: pipe = subprocess.Popen([which("git"), "svn", "info", lldbRootDirectory], stdout = subprocess.PIPE) - svn_info = pipe.stdout.read() - if not noHeaders: - print(svn_info) - - global ignore + configuration.svn_info = pipe.stdout.read() + if not configuration.noHeaders: + print(configuration.svn_info) lldbPythonDir = None # The directory that contains 'lldb/__init__.py' - if lldbFrameworkPath: - candidatePath = os.path.join(lldbFrameworkPath, 'Resources', 'Python') + if configuration.lldbFrameworkPath: + candidatePath = os.path.join(configuration.lldbFrameworkPath, 'Resources', 'Python') if os.path.isfile(os.path.join(candidatePath, 'lldb/__init__.py')): lldbPythonDir = candidatePath if not lldbPythonDir: - print('Resources/Python/lldb/__init__.py was not found in ' + lldbFrameworkPath) + print('Resources/Python/lldb/__init__.py was not found in ' + configuration.lldbFrameworkPath) sys.exit(-1) else: # The '-i' option is used to skip looking for lldb.py in the build tree. - if ignore: + if configuration.ignore: return # If our lldb supports the -P option, use it to find the python path: @@ -1230,34 +947,27 @@ # This is to locate the lldb.py module. Insert it right after sys.path[0]. sys.path[1:1] = [lldbPythonDir] - if dumpSysPath: + if configuration.dumpSysPath: print("sys.path:", sys.path) def visit(prefix, dir, names): """Visitor function for os.path.walk(path, visit, arg).""" - global suite - global regexp - global filters - global fs4all - global excluded - global all_tests - - if set(dir.split(os.sep)).intersection(excluded): + if set(dir.split(os.sep)).intersection(configuration.excluded): #print("Detected an excluded dir component: %s" % dir) return for name in names: if '.py' == os.path.splitext(name)[1] and name.startswith(prefix): - if name in all_tests: + if name in configuration.all_tests: raise Exception("Found multiple tests with the name %s" % name) - all_tests.add(name) + configuration.all_tests.add(name) # Try to match the regexp pattern, if specified. - if regexp: + if configuration.regexp: import re - if re.search(regexp, name): + if re.search(configuration.regexp, name): #print("Filename: '%s' matches pattern: '%s'" % (name, regexp)) pass else: @@ -1274,7 +984,7 @@ # Thoroughly check the filterspec against the base module and admit # the (base, filterspec) combination only when it makes sense. filterspec = None - for filterspec in filters: + for filterspec in configuration.filters: # Optimistically set the flag to True. filtered = True module = __import__(base) @@ -1291,13 +1001,13 @@ # If filtered, we have a good filterspec. Add it. if filtered: #print("adding filter spec %s to module %s" % (filterspec, module)) - suite.addTests( + configuration.suite.addTests( unittest2.defaultTestLoader.loadTestsFromName(filterspec, module)) continue # Forgo this module if the (base, filterspec) combo is invalid # and no '-g' option is specified - if filters and fs4all and not filtered: + if configuration.filters and configuration.fs4all and not filtered: continue # Add either the filtered test case(s) (which is done before) or the entire test class. @@ -1305,7 +1015,7 @@ # A simple case of just the module name. Also the failover case # from the filterspec branch when the (base, filterspec) combo # doesn't make sense. - suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base)) + configuration.suite.addTests(unittest2.defaultTestLoader.loadTestsFromName(base)) def disabledynamics(): @@ -1394,7 +1104,7 @@ # the inferior (as specified by the multiprocess test # runner) OR we've been told to skip using the multiprocess # test runner - return not (is_inferior_test_runner or no_multiprocess_test_runner) + return not (configuration.is_inferior_test_runner or configuration.no_multiprocess_test_runner) def getVersionForSDK(sdk): sdk = str.lower(sdk) @@ -1412,64 +1122,13 @@ return None def setDefaultTripleForPlatform(): - if lldb_platform_name == 'ios-simulator': + if configuration.lldb_platform_name == 'ios-simulator': triple_str = 'x86_64-apple-ios%s' % (getVersionForSDK('iphonesimulator')) os.environ['TRIPLE'] = triple_str return {'TRIPLE':triple_str} return {} def run_suite(): - global just_do_benchmarks_test - global dont_do_dsym_test - global dont_do_dwarf_test - global dont_do_dwo_test - global blacklist - global blacklistConfig - global categoriesList - global validCategories - global useCategories - global skipCategories - global lldbFrameworkPath - global configFile - global archs - global compilers - global count - global dumpSysPath - global bmExecutable - global bmBreakpointSpec - global bmIterationCount - global failed - global failfast - global filters - global fs4all - global ignore - global progress_bar - global runHooks - global skip_build_and_cleanup - global skip_long_running_test - global noHeaders - global parsable - global regexp - global rdir - global sdir_name - global svn_silent - global verbose - global testdirs - global lldb_platform_name - global lldb_platform_url - global lldb_platform_working_dir - global setCrashInfoHook - global is_inferior_test_runner - global multiprocess_test_subdir - global num_threads - global output_on_success - global no_multiprocess_test_runner - global test_runner_name - global results_filename - global results_formatter_name - global results_formatter_options - global results_port - # On MacOS X, check to make sure that domain for com.apple.DebugSymbols defaults # does not exist before proceeding to running the test suite. if sys.platform.startswith("darwin"): @@ -1489,20 +1148,20 @@ # multiprocess test runner here. if isMultiprocessTestRunner(): from . import dosep - dosep.main(output_on_success, num_threads, multiprocess_test_subdir, - test_runner_name, results_formatter_object) + dosep.main(configuration.output_on_success, configuration.num_threads, + configuration.multiprocess_test_subdir, + configuration.test_runner_name, configuration.results_formatter_object) raise Exception("should never get here") - elif is_inferior_test_runner: + elif configuration.is_inferior_test_runner: # Shut off Ctrl-C processing in inferiors. The parallel # test runner handles this more holistically. signal.signal(signal.SIGINT, signal.SIG_IGN) setupSysPath() - setupCrashInfoHook() # # If '-l' is specified, do not skip the long running tests. - if not skip_long_running_test: + if not configuration.skip_long_running_test: os.environ["LLDB_SKIP_LONG_RUNNING_TEST"] = "NO" # For the time being, let's bracket the test runner within the @@ -1512,26 +1171,24 @@ # Create a singleton SBDebugger in the lldb namespace. lldb.DBG = lldb.SBDebugger.Create() - if lldb_platform_name: - print("Setting up remote platform '%s'" % (lldb_platform_name)) - lldb.remote_platform = lldb.SBPlatform(lldb_platform_name) - lldb.remote_platform_name = lldb_platform_name + if configuration.lldb_platform_name: + print("Setting up remote platform '%s'" % (configuration.lldb_platform_name)) + lldb.remote_platform = lldb.SBPlatform(configuration.lldb_platform_name) if not lldb.remote_platform.IsValid(): - print("error: unable to create the LLDB platform named '%s'." % (lldb_platform_name)) + print("error: unable to create the LLDB platform named '%s'." % (configuration.lldb_platform_name)) exitTestSuite(1) - if lldb_platform_url: + if configuration.lldb_platform_url: # We must connect to a remote platform if a LLDB platform URL was specified - print("Connecting to remote platform '%s' at '%s'..." % (lldb_platform_name, lldb_platform_url)) - lldb.platform_url = lldb_platform_url - platform_connect_options = lldb.SBPlatformConnectOptions(lldb_platform_url) + print("Connecting to remote platform '%s' at '%s'..." % (configuration.lldb_platform_name, configuration.lldb_platform_url)) + platform_connect_options = lldb.SBPlatformConnectOptions(configuration.lldb_platform_url) err = lldb.remote_platform.ConnectRemote(platform_connect_options) if err.Success(): print("Connected.") else: - print("error: failed to connect to remote platform using URL '%s': %s" % (lldb_platform_url, err)) + print("error: failed to connect to remote platform using URL '%s': %s" % (configuration.lldb_platform_url, err)) exitTestSuite(1) else: - lldb.platform_url = None + configuration.lldb_platform_url = None platform_changes = setDefaultTripleForPlatform() first = True @@ -1541,15 +1198,14 @@ first = False print("%s = %s" % (key,platform_changes[key])) - if lldb_platform_working_dir: - print("Setting remote platform working directory to '%s'..." % (lldb_platform_working_dir)) - lldb.remote_platform.SetWorkingDirectory(lldb_platform_working_dir) - lldb.remote_platform_working_dir = lldb_platform_working_dir + if configuration.lldb_platform_working_dir: + print("Setting remote platform working directory to '%s'..." % (configuration.lldb_platform_working_dir)) + lldb.remote_platform.SetWorkingDirectory(configuration.lldb_platform_working_dir) lldb.DBG.SetSelectedPlatform(lldb.remote_platform) else: lldb.remote_platform = None - lldb.remote_platform_working_dir = None - lldb.platform_url = None + configuration.lldb_platform_working_dir = None + configuration.lldb_platform_url = None target_platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2] @@ -1557,19 +1213,21 @@ # Use @dsym_test, @dwarf_test or @dwo_test decorators, defined in lldbtest.py, to mark a test as # a dsym, dwarf or dwo test. Use '-N dsym', '-N dwarf' or '-N dwo' to exclude dsym, dwarf or # dwo tests from running. - dont_do_dsym_test = dont_do_dsym_test or any(platform in target_platform for platform in ["linux", "freebsd", "windows"]) - dont_do_dwo_test = dont_do_dwo_test or any(platform in target_platform for platform in ["darwin", "macosx", "ios"]) + configuration.dont_do_dsym_test = configuration.dont_do_dsym_test \ + or any(platform in target_platform for platform in ["linux", "freebsd", "windows"]) + configuration.dont_do_dwo_test = configuration.dont_do_dwo_test \ + or any(platform in target_platform for platform in ["darwin", "macosx", "ios"]) # Don't do debugserver tests on everything except OS X. - dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform + configuration.dont_do_debugserver_test = "linux" in target_platform or "freebsd" in target_platform or "windows" in target_platform # Don't do lldb-server (llgs) tests on anything except Linux. - dont_do_llgs_test = not ("linux" in target_platform) + configuration.dont_do_llgs_test = not ("linux" in target_platform) # # Walk through the testdirs while collecting tests. # - for testdir in testdirs: + for testdir in configuration.testdirs: for (dirpath, dirnames, filenames) in os.walk(testdir): visit('Test', dirpath, filenames) @@ -1577,12 +1235,7 @@ # Now that we have loaded all the test cases, run the whole test suite. # - # Put the blacklist in the lldb namespace, to be used by lldb.TestBase. - lldb.blacklist = blacklist - # The pre_flight and post_flight come from reading a config file. - lldb.pre_flight = pre_flight - lldb.post_flight = post_flight def getsource_if_available(obj): """ Return the text of the source code for an object if available. Otherwise, @@ -1594,39 +1247,12 @@ except: return repr(obj) - if not noHeaders: - print("lldb.pre_flight:", getsource_if_available(lldb.pre_flight)) - print("lldb.post_flight:", getsource_if_available(lldb.post_flight)) - - # If either pre_flight or post_flight is defined, set lldb.test_remote to True. - if lldb.pre_flight or lldb.post_flight: - lldb.test_remote = True - else: - lldb.test_remote = False - - # So do the lldbtest_remote_sandbox and lldbtest_remote_shell_template variables. - lldb.lldbtest_remote_sandbox = lldbtest_remote_sandbox - lldb.lldbtest_remote_sandboxed_executable = None - lldb.lldbtest_remote_shell_template = lldbtest_remote_shell_template - - # Put all these test decorators in the lldb namespace. - lldb.just_do_benchmarks_test = just_do_benchmarks_test - lldb.dont_do_dsym_test = dont_do_dsym_test - lldb.dont_do_dwarf_test = dont_do_dwarf_test - lldb.dont_do_dwo_test = dont_do_dwo_test - lldb.dont_do_debugserver_test = dont_do_debugserver_test - lldb.dont_do_llgs_test = dont_do_llgs_test - - # Do we need to skip build and cleanup? - lldb.skip_build_and_cleanup = skip_build_and_cleanup - - # Put bmExecutable, bmBreakpointSpec, and bmIterationCount into the lldb namespace, too. - lldb.bmExecutable = bmExecutable - lldb.bmBreakpointSpec = bmBreakpointSpec - lldb.bmIterationCount = bmIterationCount + if not configuration.noHeaders: + print("configuration.pre_flight:", getsource_if_available(configuration.pre_flight)) + print("configuration.post_flight:", getsource_if_available(configuration.post_flight)) - # And don't forget the runHooks! - lldb.runHooks = runHooks + # If either pre_flight or post_flight is defined, set configuration.test_remote to True. + configuration.test_remote = configuration.pre_flight or configuration.post_flight # Turn on lldb loggings if necessary. lldbLoggings() @@ -1646,26 +1272,26 @@ import datetime # The windows platforms don't like ':' in the pathname. timestamp_started = datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S") - if not sdir_name: - sdir_name = timestamp_started - os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(), sdir_name) + if not configuration.sdir_name: + configuration.sdir_name = timestamp_started + os.environ["LLDB_SESSION_DIRNAME"] = os.path.join(os.getcwd(), configuration.sdir_name) - if not noHeaders: + if not configuration.noHeaders: sys.stderr.write("\nSession logs for test failures/errors/unexpected successes" - " will go into directory '%s'\n" % sdir_name) + " will go into directory '%s'\n" % configuration.sdir_name) sys.stderr.write("Command invoked: %s\n" % getMyCommandLine()) - if not os.path.isdir(sdir_name): + if not os.path.isdir(configuration.sdir_name): try: - os.mkdir(sdir_name) + os.mkdir(configuration.sdir_name) except OSError as exception: if exception.errno != errno.EEXIST: raise where_to_save_session = os.getcwd() - fname = os.path.join(sdir_name, "TestStarted-%d" % os.getpid()) + fname = os.path.join(configuration.sdir_name, "TestStarted-%d" % os.getpid()) with open(fname, "w") as f: print("Test started at: %s\n" % timestamp_started, file=f) - print(svn_info, file=f) + print(configuration.svn_info, file=f) print("Command invoked: %s\n" % getMyCommandLine(), file=f) # @@ -1676,20 +1302,20 @@ iterArchs = False iterCompilers = False - if not archs and "archs" in config: - archs = config["archs"] + if not configuration.archs and "archs" in configuration.config: + configuration.archs = configuration.config["archs"] - if isinstance(archs, list) and len(archs) >= 1: + if isinstance(configuration.archs, list) and len(configuration.archs) >= 1: iterArchs = True - if not compilers and "compilers" in config: - compilers = config["compilers"] + if not configuration.compilers and "compilers" in configuration.config: + configuration.compilers = configuration.config["compilers"] # # Add some intervention here to sanity check that the compilers requested are sane. # If found not to be an executable program, the invalid one is dropped from the list. - for i in range(len(compilers)): - c = compilers[i] + for i in range(len(configuration.compilers)): + c = configuration.compilers[i] if which(c): continue else: @@ -1699,24 +1325,24 @@ if cmd_output: if "not found" in cmd_output: print("dropping %s from the compilers used" % c) - compilers.remove(i) + configuration.compilers.remove(i) else: - compilers[i] = cmd_output.split('\n')[0] - print("'xcrun -find %s' returning %s" % (c, compilers[i])) + configuration.compilers[i] = cmd_output.split('\n')[0] + print("'xcrun -find %s' returning %s" % (c, configuration.compilers[i])) - if not parsable: - print("compilers=%s" % str(compilers)) + if not configuration.parsable: + print("compilers=%s" % str(configuration.compilers)) - if not compilers or len(compilers) == 0: + if not configuration.compilers or len(configuration.compilers) == 0: print("No eligible compiler found, exiting.") exitTestSuite(1) - if isinstance(compilers, list) and len(compilers) >= 1: + if isinstance(configuration.compilers, list) and len(configuration.compilers) >= 1: iterCompilers = True # Make a shallow copy of sys.path, we need to manipulate the search paths later. # This is only necessary if we are relocated and with different configurations. - if rdir: + if configuration.rdir: old_sys_path = sys.path[:] # If we iterate on archs or compilers, there is a chance we want to split stderr/stdout. if iterArchs or iterCompilers: @@ -1726,15 +1352,15 @@ new_stdout = None # Iterating over all possible architecture and compiler combinations. - for ia in range(len(archs) if iterArchs else 1): + for ia in range(len(configuration.archs) if iterArchs else 1): archConfig = "" if iterArchs: - os.environ["ARCH"] = archs[ia] - archConfig = "arch=%s" % archs[ia] - for ic in range(len(compilers) if iterCompilers else 1): + os.environ["ARCH"] = configuration.archs[ia] + archConfig = "arch=%s" % configuration.archs[ia] + for ic in range(len(configuration.compilers) if iterCompilers else 1): if iterCompilers: - os.environ["CC"] = compilers[ic] - configString = "%s compiler=%s" % (archConfig, compilers[ic]) + os.environ["CC"] = configuration.compilers[ic] + configString = "%s compiler=%s" % (archConfig, configuration.compilers[ic]) else: configString = archConfig @@ -1749,12 +1375,12 @@ # Check whether we need to split stderr/stdout into configuration # specific files. - if old_stderr.name != '' and config.get('split_stderr'): + if old_stderr.name != '' and configuration.config.get('split_stderr'): if new_stderr: new_stderr.close() new_stderr = open("%s.%s" % (old_stderr.name, configPostfix), "w") sys.stderr = new_stderr - if old_stdout.name != '' and config.get('split_stdout'): + if old_stdout.name != '' and configuration.config.get('split_stdout'): if new_stdout: new_stdout.close() new_stdout = open("%s.%s" % (old_stdout.name, configPostfix), "w") @@ -1765,7 +1391,7 @@ # directory and to update sys.path before invoking the test runner. # The purpose is to separate the configuration-specific directories # from each other. - if rdir: + if configuration.rdir: from shutil import copytree, rmtree, ignore_patterns newrdir = "%s.%s" % (rdir, configPostfix) @@ -1779,7 +1405,7 @@ # level test directory. # # See also lldbtest.TestBase.setUpClass(cls). - if len(testdirs) == 1 and os.path.basename(testdirs[0]) == 'test': + if len(configuration.testdirs) == 1 and os.path.basename(configuration.testdirs[0]) == 'test': os.environ["LLDB_TEST"] = os.path.join(newrdir, 'test') else: os.environ["LLDB_TEST"] = newrdir @@ -1788,18 +1414,18 @@ sys.path = [x.replace(rdir, newrdir, 1) for x in old_sys_path] # Output the configuration. - if not parsable: + if not configuration.parsable: sys.stderr.write("\nConfiguration: " + configString + "\n") #print("sys.stderr name is", sys.stderr.name) #print("sys.stdout name is", sys.stdout.name) # First, write out the number of collected test cases. - if not parsable: + if not configuration.parsable: sys.stderr.write(separator + "\n") sys.stderr.write("Collected %d test%s\n\n" - % (suite.countTestCases(), - suite.countTestCases() != 1 and "s" or "")) + % (configuration.suite.countTestCases(), + configuration.suite.countTestCases() != 1 and "s" or "")) class LLDBTestResult(unittest2.TextTestResult): """ @@ -1843,23 +1469,25 @@ super(LLDBTestResult, self).__init__(*args) LLDBTestResult.__singleton__ = self # Now put this singleton into the lldb module namespace. - lldb.test_result = self + configuration.test_result = self # Computes the format string for displaying the counter. - global suite - counterWidth = len(str(suite.countTestCases())) + counterWidth = len(str(configuration.suite.countTestCases())) self.fmt = "%" + str(counterWidth) + "d: " self.indentation = ' ' * (counterWidth + 2) # This counts from 1 .. suite.countTestCases(). self.counter = 0 (width, height) = LLDBTestResult.getTerminalSize() self.progressbar = None - global progress_bar - if width > 10 and not parsable and progress_bar: + if width > 10 and not configuration.parsable and configuration.progress_bar: try: - self.progressbar = progress.ProgressWithEvents(stdout=self.stream,start=0,end=suite.countTestCases(),width=width-10) + self.progressbar = progress.ProgressWithEvents( + stdout=self.stream, + start=0, + end=configuration.suite.countTestCases(), + width=width-10) except: self.progressbar = None - self.results_formatter = results_formatter_object + self.results_formatter = configuration.results_formatter_object def _config_string(self, test): compiler = getattr(test, "getCompiler", None) @@ -1910,8 +1538,7 @@ def startTest(self, test): if shouldSkipBecauseOfCategories(self.getCategoriesForTest(test)): self.hardMarkAsSkipped(test) - global setCrashInfoHook - setCrashInfoHook("%s at %s" % (str(test),inspect.getfile(test.__class__))) + configuration.setCrashInfoHook("%s at %s" % (str(test),inspect.getfile(test.__class__))) self.counter += 1 #if self.counter == 4: # import crashinfo @@ -1925,37 +1552,32 @@ EventBuilder.event_for_start(test)) def addSuccess(self, test): - global parsable super(LLDBTestResult, self).addSuccess(test) - if parsable: + if configuration.parsable: self.stream.write("PASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_success(test)) def addError(self, test, err): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addError(test, err) method = getattr(test, "markError", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_error(test, err)) def addCleanupError(self, test, err): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addCleanupError(test, err) method = getattr(test, "markCleanupError", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("CLEANUP ERROR: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( @@ -1963,37 +1585,32 @@ test, err)) def addFailure(self, test, err): - global sdir_has_content - global failuresPerCategory - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addFailure(test, err) method = getattr(test, "markFailure", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("FAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) - if useCategories: + if configuration.useCategories: test_categories = self.getCategoriesForTest(test) for category in test_categories: - if category in failuresPerCategory: - failuresPerCategory[category] = failuresPerCategory[category] + 1 + if category in configuration.failuresPerCategory: + configuration.failuresPerCategory[category] = configuration.failuresPerCategory[category] + 1 else: - failuresPerCategory[category] = 1 + configuration.failuresPerCategory[category] = 1 if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_failure(test, err)) def addExpectedFailure(self, test, err, bugnumber): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addExpectedFailure(test, err, bugnumber) method = getattr(test, "markExpectedFailure", None) if method: method(err, bugnumber) - if parsable: + if configuration.parsable: self.stream.write("XFAIL: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( @@ -2001,28 +1618,24 @@ test, err, bugnumber)) def addSkip(self, test, reason): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addSkip(test, reason) method = getattr(test, "markSkippedTest", None) if method: method() - if parsable: + if configuration.parsable: self.stream.write("UNSUPPORTED: LLDB (%s) :: %s (%s) \n" % (self._config_string(test), str(test), reason)) if self.results_formatter: self.results_formatter.handle_event( EventBuilder.event_for_skip(test, reason)) def addUnexpectedSuccess(self, test, bugnumber): - global sdir_has_content - global parsable - sdir_has_content = True + configuration.sdir_has_content = True super(LLDBTestResult, self).addUnexpectedSuccess(test, bugnumber) method = getattr(test, "markUnexpectedSuccess", None) if method: method(bugnumber) - if parsable: + if configuration.parsable: self.stream.write("XPASS: LLDB (%s) :: %s\n" % (self._config_string(test), str(test))) if self.results_formatter: self.results_formatter.handle_event( @@ -2030,19 +1643,19 @@ test, bugnumber)) - if parsable: + if configuration.parsable: v = 0 - elif progress_bar: + elif configuration.progress_bar: v = 1 else: - v = verbose + v = configuration.verbose # Invoke the test runner. - if count == 1: + if configuration.count == 1: result = unittest2.TextTestRunner(stream=sys.stderr, verbosity=v, - failfast=failfast, - resultclass=LLDBTestResult).run(suite) + failfast=configuration.failfast, + resultclass=LLDBTestResult).run(configuration.suite) else: # We are invoking the same test suite more than once. In this case, # mark __ignore_singleton__ flag as True so the signleton pattern is @@ -2052,22 +1665,22 @@ result = unittest2.TextTestRunner(stream=sys.stderr, verbosity=v, - failfast=failfast, - resultclass=LLDBTestResult).run(suite) + failfast=configuration.failfast, + resultclass=LLDBTestResult).run(configuration.suite) - failed = failed or not result.wasSuccessful() + configuration.failed = configuration.failed or not result.wasSuccessful() - if sdir_has_content and not parsable: + if configuration.sdir_has_content and not configuration.parsable: sys.stderr.write("Session logs for test failures/errors/unexpected successes" - " can be found in directory '%s'\n" % sdir_name) + " can be found in directory '%s'\n" % configuration.sdir_name) - if useCategories and len(failuresPerCategory) > 0: + if configuration.useCategories and len(configuration.failuresPerCategory) > 0: sys.stderr.write("Failures per category:\n") - for category in failuresPerCategory: - sys.stderr.write("%s - %d\n" % (category,failuresPerCategory[category])) + for category in configuration.failuresPerCategory: + sys.stderr.write("%s - %d\n" % (category, configuration.failuresPerCategory[category])) os.chdir(where_to_save_session) - fname = os.path.join(sdir_name, "TestFinished-%d" % os.getpid()) + fname = os.path.join(configuration.sdir_name, "TestFinished-%d" % os.getpid()) with open(fname, "w") as f: print("Test finished at: %s\n" % datetime.datetime.now().strftime("%Y-%m-%d-%H_%M_%S"), file=f) @@ -2078,7 +1691,7 @@ subprocess.Popen(["/bin/sh", "-c", "kill %s; exit 0" % (os.getpid())]) # Exiting. - exitTestSuite(failed) + exitTestSuite(configuration.failed) if __name__ == "__main__": print(__file__ + " is for use as a module only. It should not be run as a standalone script.") Index: packages/Python/lldbsuite/test/functionalities/inferior-changed/TestInferiorChanged.py =================================================================== --- packages/Python/lldbsuite/test/functionalities/inferior-changed/TestInferiorChanged.py +++ packages/Python/lldbsuite/test/functionalities/inferior-changed/TestInferiorChanged.py @@ -6,6 +6,7 @@ import os, time import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbtest import * import lldbsuite.test.lldbutil as lldbutil @@ -55,7 +56,7 @@ """Test lldb reloads the inferior after it was changed during the session.""" self.runCmd("process kill") # Prod the lldb-platform that we have a newly built inferior ready. - if lldb.lldbtest_remote_sandbox: + if configuration.lldbtest_remote_sandbox: self.runCmd("file " + self.exe, CURRENT_EXECUTABLE_SET) self.runCmd("run", RUN_SUCCEEDED) self.runCmd("process status") Index: packages/Python/lldbsuite/test/functionalities/stop-hook/TestStopHookMechanism.py =================================================================== --- packages/Python/lldbsuite/test/functionalities/stop-hook/TestStopHookMechanism.py +++ packages/Python/lldbsuite/test/functionalities/stop-hook/TestStopHookMechanism.py @@ -8,6 +8,7 @@ import os import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbtest import * class StopHookMechanismTestCase(TestBase): @@ -47,9 +48,9 @@ child.expect_exact(prompt) child.sendline('platform select %s' % lldb.remote_platform.GetName()) child.expect_exact(prompt) - child.sendline('platform connect %s' % lldb.platform_url) + child.sendline('platform connect %s' % configuration.lldb_platform_url) child.expect_exact(prompt) - child.sendline('platform settings -w %s' % lldb.remote_platform_working_dir) + child.sendline('platform settings -w %s' % configuration.lldb_platform_working_dir) child.expect_exact(prompt) child.sendline('target create %s' % exe) Index: packages/Python/lldbsuite/test/functionalities/stop-hook/multiple_threads/TestStopHookMultipleThreads.py =================================================================== --- packages/Python/lldbsuite/test/functionalities/stop-hook/multiple_threads/TestStopHookMultipleThreads.py +++ packages/Python/lldbsuite/test/functionalities/stop-hook/multiple_threads/TestStopHookMultipleThreads.py @@ -8,6 +8,7 @@ import os, time import lldb +from lldbsuite.test import configuration from lldbsuite.test.lldbtest import * class StopHookForMultipleThreadsTestCase(TestBase): @@ -49,9 +50,9 @@ child.expect_exact(prompt) child.sendline('platform select %s' % lldb.remote_platform.GetName()) child.expect_exact(prompt) - child.sendline('platform connect %s' % lldb.platform_url) + child.sendline('platform connect %s' % configuration.lldb_platform_url) child.expect_exact(prompt) - child.sendline('platform settings -w %s' % lldb.remote_platform_working_dir) + child.sendline('platform settings -w %s' % configuration.lldb_platform_working_dir) child.expect_exact(prompt) child.sendline('target create %s' % exe) Index: packages/Python/lldbsuite/test/lldbtest.py =================================================================== --- packages/Python/lldbsuite/test/lldbtest.py +++ packages/Python/lldbsuite/test/lldbtest.py @@ -58,6 +58,7 @@ # LLDB modules import lldb +from . import configuration from . import lldbtest_config from . import lldbutil from . import test_categories @@ -460,9 +461,9 @@ def android_device_api(): if not hasattr(android_device_api, 'result'): - assert lldb.platform_url is not None + assert configuration.lldb_platform_url is not None device_id = None - parsed_url = urlparse.urlparse(lldb.platform_url) + parsed_url = urlparse.urlparse(configuration.lldb_platform_url) host_name = parsed_url.netloc.split(":")[0] if host_name != 'localhost': device_id = host_name @@ -520,7 +521,7 @@ raise Exception("@benchmarks_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if not lldb.just_do_benchmarks_test: + if not configuration.just_do_benchmarks_test: self.skipTest("benchmarks tests") return func(self, *args, **kwargs) @@ -547,7 +548,7 @@ raise Exception("@dsym_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if lldb.dont_do_dsym_test: + if configuration.dont_do_dsym_test: self.skipTest("dsym tests") return func(self, *args, **kwargs) @@ -561,7 +562,7 @@ raise Exception("@dwarf_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if lldb.dont_do_dwarf_test: + if configuration.dont_do_dwarf_test: self.skipTest("dwarf tests") return func(self, *args, **kwargs) @@ -575,7 +576,7 @@ raise Exception("@dwo_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if lldb.dont_do_dwo_test: + if configuration.dont_do_dwo_test: self.skipTest("dwo tests") return func(self, *args, **kwargs) @@ -589,7 +590,7 @@ raise Exception("@debugserver_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if lldb.dont_do_debugserver_test: + if configuration.dont_do_debugserver_test: self.skipTest("debugserver tests") return func(self, *args, **kwargs) @@ -603,7 +604,7 @@ raise Exception("@llgs_test can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if lldb.dont_do_llgs_test: + if configuration.dont_do_llgs_test: self.skipTest("llgs tests") return func(self, *args, **kwargs) @@ -617,7 +618,7 @@ raise Exception("@not_remote_testsuite_ready can only be used to decorate a test method") @wraps(func) def wrapper(self, *args, **kwargs): - if lldb.lldbtest_remote_sandbox or lldb.remote_platform: + if configuration.lldbtest_remote_sandbox or lldb.remote_platform: self.skipTest("not ready for remote testsuite") return func(self, *args, **kwargs) @@ -921,7 +922,7 @@ def skipIfiOSSimulator(func): """Decorate the item to skip tests that should be skipped on the iOS Simulator.""" - return unittest2.skipIf(hasattr(lldb, 'remote_platform_name') and lldb.remote_platform_name == 'ios-simulator', 'skip on the iOS Simulator')(func) + return unittest2.skipIf(configuration.lldb_platform_name == 'ios-simulator', 'skip on the iOS Simulator')(func) def skipIfFreeBSD(func): """Decorate the item to skip tests that should be skipped on FreeBSD.""" @@ -1323,7 +1324,7 @@ Do class-wide cleanup. """ - if doCleanup and not lldb.skip_build_and_cleanup: + if doCleanup and not configuration.skip_build_and_cleanup: # First, let's do the platform-specific cleanup. module = builder_module() module.cleanup() @@ -1444,7 +1445,7 @@ # which also sets the "__benchmarks_test__" attribute of the # function object to True. try: - if lldb.just_do_benchmarks_test: + if configuration.just_do_benchmarks_test: testMethod = getattr(self, self._testMethodName) if getattr(testMethod, "__benchmarks_test__", False): pass @@ -1542,7 +1543,7 @@ See also dotest.py where lldb.runHooks are processed/populated. """ - if not lldb.runHooks: + if not configuration.runHooks: self.skipTest("No runhooks specified for lldb, skip the test") if use_cmd_api: for hook in lldb.runhooks: @@ -1839,16 +1840,16 @@ # output tracebacks into session pairs = [] if self.__errored__: - pairs = lldb.test_result.errors + pairs = configuration.test_result.errors prefix = 'Error' elif self.__cleanup_errored__: - pairs = lldb.test_result.cleanup_errors + pairs = configuration.test_result.cleanup_errors prefix = 'CleanupError' elif self.__failed__: - pairs = lldb.test_result.failures + pairs = configuration.test_result.failures prefix = 'Failure' elif self.__expected__: - pairs = lldb.test_result.expectedFailures + pairs = configuration.test_result.expectedFailures prefix = 'ExpectedFailure' elif self.__skipped__: prefix = 'SkippedTest' @@ -2144,7 +2145,7 @@ def buildDefault(self, architecture=None, compiler=None, dictionary=None, clean=True): """Platform specific way to build the default binaries.""" - if lldb.skip_build_and_cleanup: + if configuration.skip_build_and_cleanup: return module = builder_module() if target_is_android(): @@ -2154,7 +2155,7 @@ def buildDsym(self, architecture=None, compiler=None, dictionary=None, clean=True): """Platform specific way to build binaries with dsym info.""" - if lldb.skip_build_and_cleanup: + if configuration.skip_build_and_cleanup: return module = builder_module() if not module.buildDsym(self, architecture, compiler, dictionary, clean): @@ -2162,7 +2163,7 @@ def buildDwarf(self, architecture=None, compiler=None, dictionary=None, clean=True): """Platform specific way to build binaries with dwarf maps.""" - if lldb.skip_build_and_cleanup: + if configuration.skip_build_and_cleanup: return module = builder_module() if target_is_android(): @@ -2172,7 +2173,7 @@ def buildDwo(self, architecture=None, compiler=None, dictionary=None, clean=True): """Platform specific way to build binaries with dwarf maps.""" - if lldb.skip_build_and_cleanup: + if configuration.skip_build_and_cleanup: return module = builder_module() if target_is_android(): @@ -2247,7 +2248,7 @@ def cleanup(self, dictionary=None): """Platform specific way to do cleanup after build.""" - if lldb.skip_build_and_cleanup: + if configuration.skip_build_and_cleanup: return module = builder_module() if not module.cleanup(self, dictionary): @@ -2416,13 +2417,14 @@ Base.setUp(self) try: - if lldb.blacklist: + blacklist = configuration.blacklist + if blacklist: className = self.__class__.__name__ classAndMethodName = "%s.%s" % (className, self._testMethodName) - if className in lldb.blacklist: - self.skipTest(lldb.blacklist.get(className)) - elif classAndMethodName in lldb.blacklist: - self.skipTest(lldb.blacklist.get(classAndMethodName)) + if className in blacklist: + self.skipTest(blacklist.get(className)) + elif classAndMethodName in blacklist: + self.skipTest(blacklist.get(classAndMethodName)) except AttributeError: pass @@ -2442,7 +2444,7 @@ # command, instead. See also runCmd() where it decorates the "file filename" call # with additional functionality when running testsuite remotely. # - if lldb.lldbtest_remote_sandbox: + if configuration.lldbtest_remote_sandbox: def DecoratedCreateTarget(arg): self.runCmd("file %s" % arg) target = self.dbg.GetSelectedTarget() @@ -2472,12 +2474,12 @@ self.res = lldb.SBCommandReturnObject() # Run global pre-flight code, if defined via the config file. - if lldb.pre_flight: - lldb.pre_flight(self) + if configuration.pre_flight: + configuration.pre_flight(self) - if lldb.remote_platform and lldb.remote_platform_working_dir: + if lldb.remote_platform and configuration.lldb_platform_working_dir: remote_test_dir = lldbutil.join_remote_paths( - lldb.remote_platform_working_dir, + configuration.lldb_platform_working_dir, self.getArchitecture(), str(self.test_number), self.mydir) @@ -2601,8 +2603,8 @@ self.dbg.DeleteTarget(target) # Run global post-flight code, if defined via the config file. - if lldb.post_flight: - lldb.post_flight(self) + if configuration.post_flight: + configuration.post_flight(self) # Do this last, to make sure it's in reverse order from how we setup. Base.tearDown(self) @@ -2641,7 +2643,7 @@ # via the settig of lldb.lldbtest_remote_sandbox. if cmd.startswith("target create "): cmd = cmd.replace("target create ", "file ") - if cmd.startswith("file ") and lldb.lldbtest_remote_sandbox: + if cmd.startswith("file ") and configuration.lldbtest_remote_sandbox: with recording(self, trace) as sbuf: the_rest = cmd.split("file ")[1] # Split the rest of the command line. @@ -2656,7 +2658,7 @@ print("Found a file command, target (with absolute pathname)=%s" % abs_target, file=sbuf) fpath, fname = os.path.split(abs_target) parent_dir = os.path.split(fpath)[0] - platform_target_install_command = 'platform target-install %s %s' % (fpath, lldb.lldbtest_remote_sandbox) + platform_target_install_command = 'platform target-install %s %s' % (fpath, configuration.lldbtest_remote_sandbox) print("Insert this command to be run first: %s" % platform_target_install_command, file=sbuf) self.ci.HandleCommand(platform_target_install_command, self.res) # And this is the file command we want to execute, instead. @@ -2665,8 +2667,8 @@ # Populate the remote executable pathname into the lldb namespace, # so that test cases can grab this thing out of the namespace. # - lldb.lldbtest_remote_sandboxed_executable = abs_target.replace(parent_dir, lldb.lldbtest_remote_sandbox) - cmd = "file -P %s %s %s" % (lldb.lldbtest_remote_sandboxed_executable, the_rest.replace(target, ''), abs_target) + remote_sandboxed_executable = abs_target.replace(parent_dir, configuration.lldbtest_remote_sandbox) + cmd = "file -P %s %s %s" % (remote_sandboxed_executable, the_rest.replace(target, ''), abs_target) print("And this is the replaced file command: %s" % cmd, file=sbuf) running = (cmd.startswith("run") or cmd.startswith("process launch")) @@ -2849,7 +2851,7 @@ def build(self, architecture=None, compiler=None, dictionary=None, clean=True): """Platform specific way to build the default binaries.""" - if lldb.skip_build_and_cleanup: + if configuration.skip_build_and_cleanup: return module = builder_module() if target_is_android(): Index: packages/Python/lldbsuite/test/source-manager/TestSourceManager.py =================================================================== --- packages/Python/lldbsuite/test/source-manager/TestSourceManager.py +++ packages/Python/lldbsuite/test/source-manager/TestSourceManager.py @@ -26,7 +26,7 @@ TestBase.setUp(self) # Find the line number to break inside main(). self.line = line_number('main.c', '// Set break point at this line.') - lldb.skip_build_and_cleanup = False + configuration.skip_build_and_cleanup = False @add_test_categories(['pyapi']) def test_display_source_python(self): Index: packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py =================================================================== --- packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py +++ packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py @@ -19,6 +19,7 @@ import sys import tempfile import time +from lldbsuite.test import configuration from lldbsuite.test.lldbtest import * from lldbgdbserverutils import * import logging @@ -60,13 +61,13 @@ self.named_pipe = None self.named_pipe_fd = None self.stub_sends_two_stop_notifications_on_kill = False - if lldb.platform_url: - if lldb.platform_url.startswith('unix-'): + if configuration.lldb_platform_url: + if configuration.lldb_platform_url.startswith('unix-'): url_pattern = '(.+)://\[?(.+?)\]?/.*' else: url_pattern = '(.+)://(.+):\d+' - scheme, host = re.match(url_pattern, lldb.platform_url).groups() - if lldb.remote_platform_name == 'remote-android' and host != 'localhost': + scheme, host = re.match(url_pattern, configuration.lldb_platform_url).groups() + if configuration.lldb_platform_name == 'remote-android' and host != 'localhost': self.stub_device = host self.stub_hostname = 'localhost' else: