Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/.clang-format =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/.clang-format +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/.clang-format @@ -0,0 +1 @@ +BasedOnStyle: LLVM Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/Makefile @@ -0,0 +1,8 @@ +LEVEL = ../../make + +override CFLAGS_EXTRAS += -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS +ENABLE_THREADS := YES +CXX_SOURCES := main.cpp +MAKE_DSYM :=NO + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestAppleSimulatorOSType.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestAppleSimulatorOSType.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestAppleSimulatorOSType.py @@ -0,0 +1,123 @@ +from __future__ import print_function + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +import json + +class TestAppleSimulatorOSType(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def check_simulator_ostype(self, sdk, platform, arch='x86_64'): + sim_devices_str = subprocess.check_output(['xcrun', 'simctl', 'list', + '-j', 'devices']).decode("utf-8") + sim_devices = json.loads(sim_devices_str)['devices'] + # Find an available simulator for the requested platform + deviceUDID = None + for simulator in sim_devices: + if isinstance(simulator,dict): + runtime = simulator['name'] + devices = simulator['devices'] + else: + runtime = simulator + devices = sim_devices[simulator] + if not platform in runtime.lower(): + continue + for device in devices: + if 'availability' in device and device['availability'] != '(available)': + continue + if 'isAvailable' in device and device['isAvailable'] != True: + continue + deviceUDID = device['udid'] + break + if deviceUDID != None: + break + + # Launch the process using simctl + self.assertIsNotNone(deviceUDID) + exe_name = 'test_simulator_platform_{}'.format(platform) + sdkroot = subprocess.check_output(['xcrun', '--show-sdk-path', '--sdk', + sdk]).decode("utf-8") + self.build(dictionary={ 'EXE': exe_name, 'SDKROOT': sdkroot.strip(), + 'ARCH': arch }) + exe_path = self.getBuildArtifact(exe_name) + sim_launcher = subprocess.Popen(['xcrun', 'simctl', 'spawn', + deviceUDID, exe_path, + 'print-pid', 'sleep:10'], + stderr=subprocess.PIPE) + # Get the PID from the process output + pid = None + while not pid: + stderr = sim_launcher.stderr.readline().decode("utf-8") + if stderr == '': + continue + m = re.match(r"PID: (.*)", stderr) + self.assertIsNotNone(m) + pid = int(m.group(1)) + + # Launch debug monitor attaching to the simulated process + self.init_debugserver_test() + server = self.connect_to_debug_monitor(attach_pid=pid) + + # Setup packet sequences + self.add_no_ack_remote_stream() + self.add_process_info_collection_packets() + self.test_sequence.add_log_lines( + ["read packet: " + + "$jGetLoadedDynamicLibrariesInfos:{\"fetch_all_solibs\" : true}]#ce", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "dylib_info_raw"}}], + True) + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Check that ostype is correct + self.assertEquals(process_info['ostype'], platform) + + # Now for dylibs + dylib_info_raw = context.get("dylib_info_raw") + dylib_info = json.loads(self.decode_gdbremote_binary(dylib_info_raw)) + images = dylib_info['images'] + + image_info = None + for image in images: + if image['pathname'] != exe_path: + continue + image_info = image + break + + self.assertIsNotNone(image_info) + self.assertEquals(image['min_version_os_name'], platform) + + + @apple_simulator_test('iphone') + @debugserver_test + @skipIfDarwinEmbedded + def test_simulator_ostype_ios(self): + self.check_simulator_ostype(sdk='iphonesimulator', + platform='ios') + + @apple_simulator_test('appletv') + @debugserver_test + @skipIfDarwinEmbedded + def test_simulator_ostype_tvos(self): + self.check_simulator_ostype(sdk='appletvsimulator', + platform='tvos') + + @apple_simulator_test('watch') + @debugserver_test + @skipIfDarwinEmbedded + def test_simulator_ostype_watchos(self): + self.check_simulator_ostype(sdk='watchsimulator', + platform='watchos', arch='i386') Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAttach.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAttach.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAttach.py @@ -0,0 +1,67 @@ +from __future__ import print_function + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def attach_with_vAttach(self): + # Start the inferior, start the debug monitor, nothing is attached yet. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["sleep:60"]) + self.assertIsNotNone(procs) + + # Make sure the target process has been launched. + inferior = procs.get("inferior") + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + self.assertTrue( + lldbgdbserverutils.process_is_running( + inferior.pid, True)) + + # Add attach packets. + self.test_sequence.add_log_lines([ + # Do the attach. + "read packet: $vAttach;{:x}#00".format(inferior.pid), + # Expect a stop notification from the attach. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$", + "capture": {1: "stop_signal_hex"}}, + ], True) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, inferior.pid) + + @debugserver_test + def test_attach_with_vAttach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() + + @llgs_test + def test_attach_with_vAttach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAuxvSupport.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAuxvSupport.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAuxvSupport.py @@ -0,0 +1,220 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read" + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def has_auxv_support(self): + inferior_args = ["message:main entered", "sleep:5"] + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + # Don't do anything until we match the launched inferior main entry output. + # Then immediately interrupt the process. + # This prevents auxv data being asked for before it's ready and leaves + # us in a stopped state. + self.test_sequence.add_log_lines([ + # Start the inferior... + "read packet: $c#63", + # ... match output.... + {"type": "output_match", "regex": self.maybe_strict_output_regex( + r"message:main entered\r\n")}, + ], True) + # ... then interrupt. + self.add_interrupt_packets() + self.add_qSupported_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + features = self.parse_qSupported_response(context) + return self.AUXV_SUPPORT_FEATURE_NAME in features and features[ + self.AUXV_SUPPORT_FEATURE_NAME] == "+" + + def get_raw_auxv_data(self): + # Start up llgs and inferior, and check for auxv support. + if not self.has_auxv_support(): + self.skipTest("auxv data not supported") + + # Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target. + # Auxv is specified in terms of pairs of unsigned longs. + self.reset_test_sequence() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + proc_info = self.parse_process_info_response(context) + self.assertIsNotNone(proc_info) + self.assertTrue("ptrsize" in proc_info) + word_size = int(proc_info["ptrsize"]) + + OFFSET = 0 + LENGTH = 0x400 + + # Grab the auxv data. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $qXfer:auxv:read::{:x},{:x}:#00".format( + OFFSET, + LENGTH), + { + "direction": "send", + "regex": re.compile( + r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", + re.MULTILINE | re.DOTALL), + "capture": { + 1: "response_type", + 2: "content_raw"}}], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure we end up with all auxv data in one packet. + # FIXME don't assume it all comes back in one packet. + self.assertEqual(context.get("response_type"), "l") + + # Decode binary data. + content_raw = context.get("content_raw") + self.assertIsNotNone(content_raw) + return (word_size, self.decode_gdbremote_binary(content_raw)) + + def supports_auxv(self): + # When non-auxv platforms support llgs, skip the test on platforms + # that don't support auxv. + self.assertTrue(self.has_auxv_support()) + + # + # We skip the "supports_auxv" test on debugserver. The rest of the tests + # appropriately skip the auxv tests if the support flag is not present + # in the qSupported response, so the debugserver test bits are still there + # in case debugserver code one day does have auxv support and thus those + # tests don't get skipped. + # + + @llgs_test + def test_supports_auxv_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.supports_auxv() + + def auxv_data_is_correct_size(self): + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Ensure auxv data is a multiple of 2*word_size (there should be two + # unsigned long fields per auxv entry). + self.assertEqual(len(auxv_data) % (2 * word_size), 0) + # print("auxv contains {} entries".format(len(auxv_data) / (2*word_size))) + + @debugserver_test + def test_auxv_data_is_correct_size_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_data_is_correct_size() + + @llgs_test + def test_auxv_data_is_correct_size_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_data_is_correct_size() + + def auxv_keys_look_valid(self): + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Grab endian. + self.reset_test_sequence() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) + self.assertIsNotNone(auxv_dict) + + # Verify keys look reasonable. + for auxv_key in auxv_dict: + self.assertTrue(auxv_key >= 1) + self.assertTrue(auxv_key <= 1000) + # print("auxv dict: {}".format(auxv_dict)) + + @debugserver_test + def test_auxv_keys_look_valid_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_keys_look_valid() + + @llgs_test + def test_auxv_keys_look_valid_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_keys_look_valid() + + def auxv_chunked_reads_work(self): + # Verify that multiple smaller offset,length reads of auxv data + # return the same data as a single larger read. + + # Grab the auxv data with a single large read here. + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Grab endian. + self.reset_test_sequence() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) + self.assertIsNotNone(auxv_dict) + + iterated_auxv_data = self.read_binary_data_in_chunks( + "qXfer:auxv:read::", 2 * word_size) + self.assertIsNotNone(iterated_auxv_data) + + auxv_dict_iterated = self.build_auxv_dict( + endian, word_size, iterated_auxv_data) + self.assertIsNotNone(auxv_dict_iterated) + + # Verify both types of data collection returned same content. + self.assertEqual(auxv_dict_iterated, auxv_dict) + + @debugserver_test + def test_auxv_chunked_reads_work_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_chunked_reads_work() + + @llgs_test + def test_auxv_chunked_reads_work_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_chunked_reads_work() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExitCode.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExitCode.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExitCode.py @@ -0,0 +1,127 @@ +from __future__ import print_function + +# lldb test suite imports +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import TestBase + +# gdb-remote-specific imports +import lldbgdbserverutils +from gdbremote_testcase import GdbRemoteTestCaseBase + + +class TestGdbRemoteExitCode(GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + FAILED_LAUNCH_CODE = "E08" + + def get_launch_fail_reason(self): + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $qLaunchSuccess#00"], + True) + self.test_sequence.add_log_lines( + [{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "launch_result"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + return context.get("launch_result")[1:] + + def start_inferior(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["read packet: %s" % lldbgdbserverutils.build_gdbremote_A_packet( + launch_args)], + True) + self.test_sequence.add_log_lines( + [{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "A_result"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + launch_result = context.get("A_result") + self.assertIsNotNone(launch_result) + if launch_result == self.FAILED_LAUNCH_CODE: + fail_reason = self.get_launch_fail_reason() + self.fail("failed to launch inferior: " + fail_reason) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_start_inferior_debugserver(self): + self.init_debugserver_test() + self.build() + self.start_inferior() + + @llgs_test + def test_start_inferior_llgs(self): + self.init_llgs_test() + self.build() + self.start_inferior() + + def inferior_exit_0(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W00#00"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_inferior_exit_0_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_exit_0() + + @llgs_test + def test_inferior_exit_0_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_exit_0() + + def inferior_exit_42(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + RETVAL = 42 + + # build launch args + launch_args += ["retval:%d" % RETVAL] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W{0:02x}#00".format(RETVAL)], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_inferior_exit_42_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_exit_42() + + @llgs_test + def test_inferior_exit_42_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_exit_42() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py @@ -0,0 +1,162 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteExpeditedRegisters( + gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + + def gather_expedited_registers(self): + # Setup the stub and set the gdb remote command stream. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) + self.test_sequence.add_log_lines([ + # Start up the inferior. + "read packet: $c#63", + # Immediately tell it to stop. We want to see what it reports. + "read packet: {}".format(chr(3)), + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_result", + 2: "key_vals_text"}}, + ], True) + + # Run the gdb remote command stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Pull out expedited registers. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + + expedited_registers = self.extract_registers_from_stop_notification( + key_vals_text) + self.assertIsNotNone(expedited_registers) + + return expedited_registers + + def stop_notification_contains_generic_register( + self, generic_register_name): + # Generate a stop reply, parse out expedited registers from stop + # notification. + expedited_registers = self.gather_expedited_registers() + self.assertIsNotNone(expedited_registers) + self.assertTrue(len(expedited_registers) > 0) + + # Gather target register infos. + reg_infos = self.gather_register_infos() + + # Find the generic register. + reg_info = self.find_generic_register_with_name( + reg_infos, generic_register_name) + self.assertIsNotNone(reg_info) + + # Ensure the expedited registers contained it. + self.assertTrue(reg_info["lldb_register_index"] in expedited_registers) + # print("{} reg_info:{}".format(generic_register_name, reg_info)) + + def stop_notification_contains_any_registers(self): + # Generate a stop reply, parse out expedited registers from stop + # notification. + expedited_registers = self.gather_expedited_registers() + # Verify we have at least one expedited register. + self.assertTrue(len(expedited_registers) > 0) + + @debugserver_test + def test_stop_notification_contains_any_registers_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_any_registers() + + @llgs_test + def test_stop_notification_contains_any_registers_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_any_registers() + + def stop_notification_contains_no_duplicate_registers(self): + # Generate a stop reply, parse out expedited registers from stop + # notification. + expedited_registers = self.gather_expedited_registers() + # Verify no expedited register was specified multiple times. + for (reg_num, value) in list(expedited_registers.items()): + if (isinstance(value, list)) and (len(value) > 0): + self.fail( + "expedited register number {} specified more than once ({} times)".format( + reg_num, len(value))) + + @debugserver_test + def test_stop_notification_contains_no_duplicate_registers_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_no_duplicate_registers() + + @llgs_test + def test_stop_notification_contains_no_duplicate_registers_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_no_duplicate_registers() + + def stop_notification_contains_pc_register(self): + self.stop_notification_contains_generic_register("pc") + + @debugserver_test + def test_stop_notification_contains_pc_register_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_pc_register() + + @llgs_test + def test_stop_notification_contains_pc_register_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_pc_register() + + # powerpc64 has no FP register + @skipIf(triple='^powerpc64') + def stop_notification_contains_fp_register(self): + self.stop_notification_contains_generic_register("fp") + + @debugserver_test + def test_stop_notification_contains_fp_register_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_fp_register() + + @llgs_test + def test_stop_notification_contains_fp_register_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_fp_register() + + def stop_notification_contains_sp_register(self): + self.stop_notification_contains_generic_register("sp") + + @debugserver_test + def test_stop_notification_contains_sp_register_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_sp_register() + + @llgs_test + def test_stop_notification_contains_sp_register_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_sp_register() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteHostInfo.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteHostInfo.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteHostInfo.py @@ -0,0 +1,131 @@ +from __future__ import print_function + +# lldb test suite imports +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import TestBase + +# gdb-remote-specific imports +import lldbgdbserverutils +from gdbremote_testcase import GdbRemoteTestCaseBase + + +class TestGdbRemoteHostInfo(GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + KNOWN_HOST_INFO_KEYS = set([ + "arch", + "cputype", + "cpusubtype", + "distribution_id", + "endian", + "hostname", + "ostype", + "os_build", + "os_kernel", + "os_version", + "ptrsize", + "triple", + "vendor", + "watchpoint_exceptions_received", + "default_packet_timeout", + ]) + + DARWIN_REQUIRED_HOST_INFO_KEYS = set([ + "cputype", + "cpusubtype", + "endian", + "ostype", + "ptrsize", + "vendor", + "watchpoint_exceptions_received" + ]) + + def add_host_info_collection_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qHostInfo#9b", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "host_info_raw"}}], + True) + + def parse_host_info_response(self, context): + # Ensure we have a host info response. + self.assertIsNotNone(context) + host_info_raw = context.get("host_info_raw") + self.assertIsNotNone(host_info_raw) + + # Pull out key:value; pairs. + host_info_dict = {match.group(1): match.group(2) + for match in re.finditer(r"([^:]+):([^;]+);", + host_info_raw)} + + import pprint + print("\nqHostInfo response:") + pprint.pprint(host_info_dict) + + # Validate keys are known. + for (key, val) in list(host_info_dict.items()): + self.assertTrue(key in self.KNOWN_HOST_INFO_KEYS, + "unknown qHostInfo key: " + key) + self.assertIsNotNone(val) + + # Return the key:val pairs. + return host_info_dict + + def get_qHostInfo_response(self): + # Launch the debug monitor stub, attaching to the inferior. + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + self.add_no_ack_remote_stream() + + # Request qHostInfo and get response + self.add_host_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse qHostInfo response. + host_info = self.parse_host_info_response(context) + self.assertIsNotNone(host_info) + self.assertGreater(len(host_info), 0, "qHostInfo should have returned " + "at least one key:val pair.") + return host_info + + def validate_darwin_minimum_host_info_keys(self, host_info_dict): + self.assertIsNotNone(host_info_dict) + missing_keys = [key for key in self.DARWIN_REQUIRED_HOST_INFO_KEYS + if key not in host_info_dict] + self.assertEquals(0, len(missing_keys), + "qHostInfo is missing the following required " + "keys: " + str(missing_keys)) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_qHostInfo_returns_at_least_one_key_val_pair_debugserver(self): + self.init_debugserver_test() + self.build() + self.get_qHostInfo_response() + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @llgs_test + def test_qHostInfo_returns_at_least_one_key_val_pair_llgs(self): + self.init_llgs_test() + self.build() + self.get_qHostInfo_response() + + @skipUnlessDarwin + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_qHostInfo_contains_darwin_required_keys_debugserver(self): + self.init_debugserver_test() + self.build() + host_info_dict = self.get_qHostInfo_response() + self.validate_darwin_minimum_host_info_keys(host_info_dict) + + @skipUnlessDarwin + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @llgs_test + def test_qHostInfo_contains_darwin_required_keys_llgs(self): + self.init_llgs_test() + self.build() + host_info_dict = self.get_qHostInfo_response() + self.validate_darwin_minimum_host_info_keys(host_info_dict) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteKill.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteKill.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteKill.py @@ -0,0 +1,59 @@ +from __future__ import print_function + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteKill(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + + def attach_commandline_kill_after_initial_stop(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines([ + "read packet: $k#6b", + {"direction": "send", "regex": r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}"}, + ], True) + + if self.stub_sends_two_stop_notifications_on_kill: + # Add an expectation for a second X result for stubs that send two + # of these. + self.test_sequence.add_log_lines([ + {"direction": "send", "regex": r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}"}, + ], True) + + self.expect_gdbremote_sequence() + + # Wait a moment for completed and now-detached inferior process to + # clear. + time.sleep(1) + + if not lldb.remote_platform: + # Process should be dead now. Reap results. + poll_result = procs["inferior"].poll() + self.assertIsNotNone(poll_result) + + # Where possible, verify at the system level that the process is not + # running. + self.assertFalse( + lldbgdbserverutils.process_is_running( + procs["inferior"].pid, False)) + + @debugserver_test + def test_attach_commandline_kill_after_initial_stop_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_kill_after_initial_stop() + + @llgs_test + def test_attach_commandline_kill_after_initial_stop_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_kill_after_initial_stop() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteModuleInfo.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteModuleInfo.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteModuleInfo.py @@ -0,0 +1,44 @@ +from __future__ import print_function + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.support import seven +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteModuleInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def module_info(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + info = self.parse_process_info_response(context) + + self.test_sequence.add_log_lines([ + 'read packet: $jModulesInfo:[{"file":"%s","triple":"%s"}]]#00' % ( + lldbutil.append_to_process_working_directory(self, "a.out"), + seven.unhexlify(info["triple"])), + {"direction": "send", + "regex": r'^\$\[{(.*)}\]\]#[0-9A-Fa-f]{2}', + "capture": {1: "spec"}}, + ], True) + + context = self.expect_gdbremote_sequence() + spec = context.get("spec") + self.assertRegexpMatches(spec, '"file_path":".*"') + self.assertRegexpMatches(spec, '"file_offset":\d+') + self.assertRegexpMatches(spec, '"file_size":\d+') + self.assertRegexpMatches(spec, '"triple":"\w*-\w*-.*"') + self.assertRegexpMatches(spec, '"uuid":"[A-Fa-f0-9]+"') + + @llgs_test + def test_module_info(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.module_info() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteProcessInfo.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteProcessInfo.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteProcessInfo.py @@ -0,0 +1,211 @@ +from __future__ import print_function + + +import sys + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def qProcessInfo_returns_running_process(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id looks reasonable. + pid_text = process_info.get("pid") + self.assertIsNotNone(pid_text) + pid = int(pid_text, base=16) + self.assertNotEqual(0, pid) + + # If possible, verify that the process is running. + self.assertTrue(lldbgdbserverutils.process_is_running(pid, True)) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_returns_running_process_debugserver(self): + self.init_debugserver_test() + self.build() + self.qProcessInfo_returns_running_process() + + @llgs_test + def test_qProcessInfo_returns_running_process_llgs(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_returns_running_process() + + def attach_commandline_qProcessInfo_reports_correct_pid(self): + procs = self.prep_debug_monitor_and_inferior() + self.assertIsNotNone(procs) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence(timeout_seconds=8) + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, procs["inferior"].pid) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_attach_commandline_qProcessInfo_reports_correct_pid_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_qProcessInfo_reports_correct_pid() + + @llgs_test + def test_attach_commandline_qProcessInfo_reports_correct_pid_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_qProcessInfo_reports_correct_pid() + + def qProcessInfo_reports_valid_endian(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id looks reasonable. + endian = process_info.get("endian") + self.assertIsNotNone(endian) + self.assertTrue(endian in ["little", "big", "pdp"]) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_reports_valid_endian_debugserver(self): + self.init_debugserver_test() + self.build() + self.qProcessInfo_reports_valid_endian() + + @llgs_test + def test_qProcessInfo_reports_valid_endian_llgs(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_reports_valid_endian() + + def qProcessInfo_contains_keys(self, expected_key_set): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the expected keys are present and non-None within the process + # info. + missing_key_set = set() + for expected_key in expected_key_set: + if expected_key not in process_info: + missing_key_set.add(expected_key) + + self.assertEqual( + missing_key_set, + set(), + "the listed keys are missing in the qProcessInfo result") + + def qProcessInfo_does_not_contain_keys(self, absent_key_set): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the unexpected keys are not present + unexpected_key_set = set() + for unexpected_key in absent_key_set: + if unexpected_key in process_info: + unexpected_key_set.add(unexpected_key) + + self.assertEqual( + unexpected_key_set, + set(), + "the listed keys were present but unexpected in qProcessInfo result") + + @skipUnlessDarwin + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_contains_cputype_cpusubtype_debugserver_darwin(self): + self.init_debugserver_test() + self.build() + self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype'])) + + @skipUnlessDarwin + @llgs_test + def test_qProcessInfo_contains_cputype_cpusubtype_llgs_darwin(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype'])) + + @skipUnlessPlatform(["linux"]) + @llgs_test + def test_qProcessInfo_contains_triple_llgs_linux(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_contains_keys(set(['triple'])) + + @skipUnlessDarwin + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_does_not_contain_triple_debugserver_darwin(self): + self.init_debugserver_test() + self.build() + # We don't expect to see triple on darwin. If we do, we'll prefer triple + # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup + # for the remote Host and Process. + self.qProcessInfo_does_not_contain_keys(set(['triple'])) + + @skipUnlessDarwin + @llgs_test + def test_qProcessInfo_does_not_contain_triple_llgs_darwin(self): + self.init_llgs_test() + self.build() + # We don't expect to see triple on darwin. If we do, we'll prefer triple + # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup + # for the remote Host and Process. + self.qProcessInfo_does_not_contain_keys(set(['triple'])) + + @skipUnlessPlatform(["linux"]) + @llgs_test + def test_qProcessInfo_does_not_contain_cputype_cpusubtype_llgs_linux(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype'])) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteRegisterState.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteRegisterState.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteRegisterState.py @@ -0,0 +1,128 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteRegisterState(gdbremote_testcase.GdbRemoteTestCaseBase): + """Test QSaveRegisterState/QRestoreRegisterState support.""" + + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def grp_register_save_restore_works(self, with_suffix): + # Start up the process, use thread suffix, grab main thread id. + inferior_args = ["message:main entered", "sleep:5"] + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + self.add_process_info_collection_packets() + self.add_register_info_collection_packets() + if with_suffix: + self.add_thread_suffix_request_packets() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Gather register info. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + # Pull out the register infos that we think we can bit flip + # successfully. + gpr_reg_infos = [ + reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] + self.assertTrue(len(gpr_reg_infos) > 0) + + # Gather thread info. + if with_suffix: + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + thread_id = threads[0] + self.assertIsNotNone(thread_id) + # print("Running on thread: 0x{:x}".format(thread_id)) + else: + thread_id = None + + # Save register state. + self.reset_test_sequence() + self.add_QSaveRegisterState_packets(thread_id) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + (success, state_id) = self.parse_QSaveRegisterState_response(context) + self.assertTrue(success) + self.assertIsNotNone(state_id) + # print("saved register state id: {}".format(state_id)) + + # Remember initial register values. + initial_reg_values = self.read_register_values( + gpr_reg_infos, endian, thread_id=thread_id) + # print("initial_reg_values: {}".format(initial_reg_values)) + + # Flip gpr register values. + (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value( + gpr_reg_infos, endian, thread_id=thread_id) + # print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes)) + self.assertTrue(successful_writes > 0) + + flipped_reg_values = self.read_register_values( + gpr_reg_infos, endian, thread_id=thread_id) + # print("flipped_reg_values: {}".format(flipped_reg_values)) + + # Restore register values. + self.reset_test_sequence() + self.add_QRestoreRegisterState_packets(state_id, thread_id) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify registers match initial register values. + final_reg_values = self.read_register_values( + gpr_reg_infos, endian, thread_id=thread_id) + # print("final_reg_values: {}".format(final_reg_values)) + self.assertIsNotNone(final_reg_values) + self.assertEqual(final_reg_values, initial_reg_values) + + @debugserver_test + def test_grp_register_save_restore_works_with_suffix_debugserver(self): + USE_THREAD_SUFFIX = True + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @llgs_test + def test_grp_register_save_restore_works_with_suffix_llgs(self): + USE_THREAD_SUFFIX = True + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @debugserver_test + def test_grp_register_save_restore_works_no_suffix_debugserver(self): + USE_THREAD_SUFFIX = False + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @llgs_test + def test_grp_register_save_restore_works_no_suffix_llgs(self): + USE_THREAD_SUFFIX = False + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py @@ -0,0 +1,41 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_single_step_only_steps_one_instruction_with_s_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="s") + + @llgs_test + @expectedFailureAndroid( + bugnumber="llvm.org/pr24739", + archs=[ + "arm", + "aarch64"]) + @expectedFailureAll( + oslist=["linux"], + archs=[ + "arm", + "aarch64"], + bugnumber="llvm.org/pr24739") + @skipIf(triple='^mips') + def test_single_step_only_steps_one_instruction_with_s_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="s") Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py @@ -0,0 +1,303 @@ +from __future__ import print_function + +import json +import re + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +class TestGdbRemoteThreadsInStopReply( + gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [ + "read packet: $QListThreadsInStopReply#21", + "send packet: $OK#00", + ] + + def gather_stop_reply_fields(self, post_startup_log_lines, thread_count, + field_names): + # Set up the inferior args. + inferior_args = [] + for i in range(thread_count - 1): + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + # Assumes test_sequence has anything added needed to setup the initial state. + # (Like optionally enabling QThreadsInStopReply.) + if post_startup_log_lines: + self.test_sequence.add_log_lines(post_startup_log_lines, True) + self.test_sequence.add_log_lines([ + "read packet: $c#63" + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + hw_info = self.parse_hw_info(context) + + # Give threads time to start up, then break. + time.sleep(1) + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: {}".format( + chr(3)), + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Wait until all threads have started. + threads = self.wait_for_thread_count(thread_count, timeout_seconds=3) + self.assertIsNotNone(threads) + self.assertEqual(len(threads), thread_count) + + # Run, then stop the process, grab the stop reply content. + self.reset_test_sequence() + self.test_sequence.add_log_lines(["read packet: $c#63", + "read packet: {}".format(chr(3)), + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse the stop reply contents. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + kv_dict = self.parse_key_val_dict(key_vals_text) + self.assertIsNotNone(kv_dict) + + result = dict(); + result["pc_register"] = hw_info["pc_register"] + result["little_endian"] = hw_info["little_endian"] + for key_field in field_names: + result[key_field] = kv_dict.get(key_field) + + return result + + def gather_stop_reply_threads(self, post_startup_log_lines, thread_count): + # Pull out threads from stop response. + stop_reply_threads_text = self.gather_stop_reply_fields( + post_startup_log_lines, thread_count, ["threads"])["threads"] + if stop_reply_threads_text: + return [int(thread_id, 16) + for thread_id in stop_reply_threads_text.split(",")] + else: + return [] + + def gather_stop_reply_pcs(self, post_startup_log_lines, thread_count): + results = self.gather_stop_reply_fields( post_startup_log_lines, + thread_count, ["threads", "thread-pcs"]) + if not results: + return [] + + threads_text = results["threads"] + pcs_text = results["thread-pcs"] + thread_ids = threads_text.split(",") + pcs = pcs_text.split(",") + self.assertTrue(len(thread_ids) == len(pcs)) + + thread_pcs = dict() + for i in range(0, len(pcs)): + thread_pcs[int(thread_ids[i], 16)] = pcs[i] + + result = dict() + result["thread_pcs"] = thread_pcs + result["pc_register"] = results["pc_register"] + result["little_endian"] = results["little_endian"] + return result + + def switch_endian(self, egg): + return "".join(reversed(re.findall("..", egg))) + + def parse_hw_info(self, context): + self.assertIsNotNone(context) + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + reg_info = self.parse_register_info_packets(context) + (pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_info) + + hw_info = dict() + hw_info["pc_register"] = pc_lldb_reg_index + hw_info["little_endian"] = (endian == "little") + return hw_info + + def gather_threads_info_pcs(self, pc_register, little_endian): + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $jThreadsInfo#c1", + { + "direction": "send", + "regex": r"^\$(.*)#[0-9a-fA-F]{2}$", + "capture": { + 1: "threads_info"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + threads_info = context.get("threads_info") + register = str(pc_register) + # The jThreadsInfo response is not valid JSON data, so we have to + # clean it up first. + jthreads_info = json.loads(re.sub(r"}]", "}", threads_info)) + thread_pcs = dict() + for thread_info in jthreads_info: + tid = thread_info["tid"] + pc = thread_info["registers"][register] + thread_pcs[tid] = self.switch_endian(pc) if little_endian else pc + + return thread_pcs + + def QListThreadsInStopReply_supported(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_QListThreadsInStopReply_supported_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.QListThreadsInStopReply_supported() + + @llgs_test + def test_QListThreadsInStopReply_supported_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.QListThreadsInStopReply_supported() + + def stop_reply_reports_multiple_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is + # enabled. + stop_reply_threads = self.gather_stop_reply_threads( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + self.assertEqual(len(stop_reply_threads), thread_count) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_stop_reply_reports_multiple_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_multiple_threads(5) + + @llgs_test + def test_stop_reply_reports_multiple_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_multiple_threads(5) + + def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is not + # enabled. + stop_reply_threads = self.gather_stop_reply_threads(None, thread_count) + self.assertEqual(len(stop_reply_threads), 0) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.no_QListThreadsInStopReply_supplies_no_threads(5) + + @llgs_test + def test_no_QListThreadsInStopReply_supplies_no_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.no_QListThreadsInStopReply_supplies_no_threads(5) + + def stop_reply_reports_correct_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is + # enabled. + stop_reply_threads = self.gather_stop_reply_threads( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + self.assertEqual(len(stop_reply_threads), thread_count) + + # Gather threads from q{f,s}ThreadInfo. + self.reset_test_sequence() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + self.assertEqual(len(threads), thread_count) + + # Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads + for tid in threads: + self.assertTrue(tid in stop_reply_threads) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_stop_reply_reports_correct_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_correct_threads(5) + + @llgs_test + def test_stop_reply_reports_correct_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_correct_threads(5) + + def stop_reply_contains_thread_pcs(self, thread_count): + results = self.gather_stop_reply_pcs( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + stop_reply_pcs = results["thread_pcs"] + pc_register = results["pc_register"] + little_endian = results["little_endian"] + self.assertEqual(len(stop_reply_pcs), thread_count) + + threads_info_pcs = self.gather_threads_info_pcs(pc_register, + little_endian) + + self.assertEqual(len(threads_info_pcs), thread_count) + for thread_id in stop_reply_pcs: + self.assertTrue(thread_id in threads_info_pcs) + self.assertTrue(int(stop_reply_pcs[thread_id], 16) + == int(threads_info_pcs[thread_id], 16)) + + @llgs_test + def test_stop_reply_contains_thread_pcs_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_contains_thread_pcs(5) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_stop_reply_contains_thread_pcs_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_contains_thread_pcs(5) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py @@ -0,0 +1,182 @@ +from __future__ import print_function + + +import sys + +import unittest2 +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemote_qThreadStopInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + THREAD_COUNT = 5 + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + @skipIfDarwinEmbedded # + def gather_stop_replies_via_qThreadStopInfo(self, thread_count): + # Set up the inferior args. + inferior_args = [] + for i in range(thread_count - 1): + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + # Assumes test_sequence has anything added needed to setup the initial state. + # (Like optionally enabling QThreadsInStopReply.) + self.test_sequence.add_log_lines([ + "read packet: $c#63" + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Give threads time to start up, then break. + time.sleep(1) + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: {}".format( + chr(3)), + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Wait until all threads have started. + threads = self.wait_for_thread_count(thread_count, timeout_seconds=3) + self.assertIsNotNone(threads) + self.assertEqual(len(threads), thread_count) + + # Grab stop reply for each thread via qThreadStopInfo{tid:hex}. + stop_replies = {} + thread_dicts = {} + for thread in threads: + # Run the qThreadStopInfo command. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $qThreadStopInfo{:x}#00".format(thread), + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse stop reply contents. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + kv_dict = self.parse_key_val_dict(key_vals_text) + self.assertIsNotNone(kv_dict) + + # Verify there is a thread and that it matches the expected thread + # id. + kv_thread = kv_dict.get("thread") + self.assertIsNotNone(kv_thread) + kv_thread_id = int(kv_thread, 16) + self.assertEqual(kv_thread_id, thread) + + # Grab the stop id reported. + stop_result_text = context.get("stop_result") + self.assertIsNotNone(stop_result_text) + stop_replies[kv_thread_id] = int(stop_result_text, 16) + + # Hang on to the key-val dictionary for the thread. + thread_dicts[kv_thread_id] = kv_dict + + return (stop_replies, thread_dicts) + + def qThreadStopInfo_works_for_multiple_threads(self, thread_count): + (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertEqual(len(stop_replies), thread_count) + + @debugserver_test + def test_qThreadStopInfo_works_for_multiple_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) + + @llgs_test + def test_qThreadStopInfo_works_for_multiple_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) + + def qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt( + self, thread_count): + (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertIsNotNone(stop_replies) + + no_stop_reason_count = sum( + 1 for stop_reason in list( + stop_replies.values()) if stop_reason == 0) + with_stop_reason_count = sum( + 1 for stop_reason in list( + stop_replies.values()) if stop_reason != 0) + + # All but one thread should report no stop reason. + self.assertEqual(no_stop_reason_count, thread_count - 1) + + # Only one thread should should indicate a stop reason. + self.assertEqual(with_stop_reason_count, 1) + + @debugserver_test + def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt( + self.THREAD_COUNT) + + @llgs_test + def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt( + self.THREAD_COUNT) + + def qThreadStopInfo_has_valid_thread_names( + self, thread_count, expected_thread_name): + (_, thread_dicts) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertIsNotNone(thread_dicts) + + for thread_dict in list(thread_dicts.values()): + name = thread_dict.get("name") + self.assertIsNotNone(name) + self.assertEqual(name, expected_thread_name) + + @unittest2.skip("MacOSX doesn't have a default thread name") + @debugserver_test + def test_qThreadStopInfo_has_valid_thread_names_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") + + # test requires OS with set, equal thread names by default. + @skipUnlessPlatform(["linux"]) + @llgs_test + def test_qThreadStopInfo_has_valid_thread_names_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vCont.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vCont.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vCont.py @@ -0,0 +1,159 @@ +from __future__ import print_function + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemote_vCont(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def vCont_supports_mode(self, mode, inferior_args=None): + # Setup the stub and set the gdb remote command stream. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + self.add_vCont_query_packets() + + # Run the gdb remote command stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Pull out supported modes. + supported_vCont_modes = self.parse_vCont_query_response(context) + self.assertIsNotNone(supported_vCont_modes) + + # Verify we support the given mode. + self.assertTrue(mode in supported_vCont_modes) + + def vCont_supports_c(self): + self.vCont_supports_mode("c") + + def vCont_supports_C(self): + self.vCont_supports_mode("C") + + def vCont_supports_s(self): + self.vCont_supports_mode("s") + + def vCont_supports_S(self): + self.vCont_supports_mode("S") + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_c_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_c() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_c_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_c() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_C_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_C() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_C_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_C() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_s_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_s() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_s_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_s() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_S_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_S() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_S_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_S() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="vCont;s") + + @llgs_test + @expectedFailureAndroid( + bugnumber="llvm.org/pr24739", + archs=[ + "arm", + "aarch64"]) + @expectedFailureAll( + oslist=["linux"], + archs=[ + "arm", + "aarch64"], + bugnumber="llvm.org/pr24739") + @skipIf(triple='^mips') + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="vCont;s") + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_single_step_only_steps_one_instruction_with_vCont_s_thread_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=False, step_instruction="vCont;s:{thread}") + + @llgs_test + @expectedFailureAndroid( + bugnumber="llvm.org/pr24739", + archs=[ + "arm", + "aarch64"]) + @expectedFailureAll( + oslist=["linux"], + archs=[ + "arm", + "aarch64"], + bugnumber="llvm.org/pr24739") + @skipIf(triple='^mips') + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + def test_single_step_only_steps_one_instruction_with_vCont_s_thread_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=False, step_instruction="vCont;s:{thread}") Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py @@ -0,0 +1,1570 @@ +""" +Test case for testing the gdbremote protocol. + +Tests run against debugserver and lldb-server (llgs). +lldb-server tests run where the lldb-server exe is +available. + +This class will be broken into smaller test case classes by +gdb remote packet functional areas. For now it contains +the initial set of tests implemented. +""" + +from __future__ import division, print_function + + +import unittest2 +import gdbremote_testcase +import lldbgdbserverutils +import platform +import signal +from lldbsuite.support import seven +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test.lldbdwarf import * +from lldbsuite.test import lldbutil + + +class LldbGdbServerTestCase(gdbremote_testcase.GdbRemoteTestCaseBase, DwarfOpcodeParser): + + mydir = TestBase.compute_mydir(__file__) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_exe_starts_debugserver(self): + self.init_debugserver_test() + server = self.connect_to_debug_monitor() + + @llgs_test + def test_exe_starts_llgs(self): + self.init_llgs_test() + server = self.connect_to_debug_monitor() + + def start_no_ack_mode(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_start_no_ack_mode_debugserver(self): + self.init_debugserver_test() + self.start_no_ack_mode() + + @llgs_test + def test_start_no_ack_mode_llgs(self): + self.init_llgs_test() + self.start_no_ack_mode() + + def thread_suffix_supported(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["lldb-server < 26> read packet: $QThreadSuffixSupported#e4", + "lldb-server < 6> send packet: $OK#9a"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_thread_suffix_supported_debugserver(self): + self.init_debugserver_test() + self.thread_suffix_supported() + + @llgs_test + def test_thread_suffix_supported_llgs(self): + self.init_llgs_test() + self.thread_suffix_supported() + + def list_threads_in_stop_reply_supported(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["lldb-server < 27> read packet: $QListThreadsInStopReply#21", + "lldb-server < 6> send packet: $OK#9a"], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_list_threads_in_stop_reply_supported_debugserver(self): + self.init_debugserver_test() + self.list_threads_in_stop_reply_supported() + + @llgs_test + def test_list_threads_in_stop_reply_supported_llgs(self): + self.init_llgs_test() + self.list_threads_in_stop_reply_supported() + + def c_packet_works(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $c#63", + "send packet: $W00#00"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_c_packet_works_debugserver(self): + self.init_debugserver_test() + self.build() + self.c_packet_works() + + @llgs_test + def test_c_packet_works_llgs(self): + self.init_llgs_test() + self.build() + self.c_packet_works() + + def inferior_print_exit(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args += ["hello, world"] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"hello, world\r\n")}, + "send packet: $W00#00"], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_inferior_print_exit_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_print_exit() + + @llgs_test + @expectedFlakeyLinux("llvm.org/pr25652") + def test_inferior_print_exit_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_print_exit() + + def first_launch_stop_reply_thread_matches_first_qC(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args += ["hello, world"] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines(["read packet: $qC#00", + {"direction": "send", + "regex": r"^\$QC([0-9a-fA-F]+)#", + "capture": {1: "thread_id"}}, + "read packet: $?#00", + {"direction": "send", + "regex": r"^\$T[0-9a-fA-F]{2}thread:([0-9a-fA-F]+)", + "expect_captures": {1: "thread_id"}}], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_first_launch_stop_reply_thread_matches_first_qC_debugserver(self): + self.init_debugserver_test() + self.build() + self.first_launch_stop_reply_thread_matches_first_qC() + + @llgs_test + def test_first_launch_stop_reply_thread_matches_first_qC_llgs(self): + self.init_llgs_test() + self.build() + self.first_launch_stop_reply_thread_matches_first_qC() + + def attach_commandline_continue_app_exits(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W00#00"], + True) + self.expect_gdbremote_sequence() + + # Wait a moment for completed and now-detached inferior process to + # clear. + time.sleep(1) + + if not lldb.remote_platform: + # Process should be dead now. Reap results. + poll_result = procs["inferior"].poll() + self.assertIsNotNone(poll_result) + + # Where possible, verify at the system level that the process is not + # running. + self.assertFalse( + lldbgdbserverutils.process_is_running( + procs["inferior"].pid, False)) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_attach_commandline_continue_app_exits_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_continue_app_exits() + + @llgs_test + def test_attach_commandline_continue_app_exits_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_continue_app_exits() + + def qRegisterInfo_returns_one_valid_result(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $qRegisterInfo0#00", + {"direction": "send", "regex": r"^\$(.+);#[0-9A-Fa-f]{2}", "capture": {1: "reginfo_0"}}], + True) + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + reg_info_packet = context.get("reginfo_0") + self.assertIsNotNone(reg_info_packet) + self.assert_valid_reg_info( + lldbgdbserverutils.parse_reg_info_response(reg_info_packet)) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_returns_one_valid_result_debugserver(self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_returns_one_valid_result() + + @llgs_test + def test_qRegisterInfo_returns_one_valid_result_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_returns_one_valid_result() + + def qRegisterInfo_returns_all_valid_results(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream. + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Validate that each register info returned validates. + for reg_info in self.parse_register_info_packets(context): + self.assert_valid_reg_info(reg_info) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_returns_all_valid_results_debugserver(self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_returns_all_valid_results() + + @llgs_test + def test_qRegisterInfo_returns_all_valid_results_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_returns_all_valid_results() + + def qRegisterInfo_contains_required_generics(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all generic registers found. + generic_regs = { + reg_info['generic']: 1 for reg_info in reg_infos if 'generic' in reg_info} + + # Ensure we have a program counter register. + self.assertTrue('pc' in generic_regs) + + # Ensure we have a frame pointer register. PPC64le's FP is the same as SP + if self.getArchitecture() != 'powerpc64le': + self.assertTrue('fp' in generic_regs) + + # Ensure we have a stack pointer register. + self.assertTrue('sp' in generic_regs) + + # Ensure we have a flags register. + self.assertTrue('flags' in generic_regs) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_contains_required_generics_debugserver(self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_contains_required_generics() + + @llgs_test + def test_qRegisterInfo_contains_required_generics_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_contains_required_generics() + + def qRegisterInfo_contains_at_least_one_register_set(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all register sets found. + register_sets = { + reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info} + self.assertTrue(len(register_sets) >= 1) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_contains_at_least_one_register_set_debugserver( + self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_contains_at_least_one_register_set() + + @llgs_test + def test_qRegisterInfo_contains_at_least_one_register_set_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_contains_at_least_one_register_set() + + def targetHasAVX(self): + triple = self.dbg.GetSelectedPlatform().GetTriple() + + # TODO other platforms, please implement this function + if not re.match(".*-.*-linux", triple): + return True + + # Need to do something different for non-Linux/Android targets + if lldb.remote_platform: + self.runCmd('platform get-file "/proc/cpuinfo" "cpuinfo"') + cpuinfo_path = "cpuinfo" + self.addTearDownHook(lambda: os.unlink("cpuinfo")) + else: + cpuinfo_path = "/proc/cpuinfo" + + f = open(cpuinfo_path, 'r') + cpuinfo = f.read() + f.close() + return " avx " in cpuinfo + + def qRegisterInfo_contains_avx_registers(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all generics found. + register_sets = { + reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info} + self.assertEqual( + self.targetHasAVX(), + "Advanced Vector Extensions" in register_sets) + + @llgs_test + def test_qRegisterInfo_contains_avx_registers_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_contains_avx_registers() + + def qThreadInfo_contains_thread(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_threadinfo_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather threadinfo entries. + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + # We should have exactly one thread. + self.assertEqual(len(threads), 1) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_contains_thread_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_contains_thread() + + @llgs_test + def test_qThreadInfo_contains_thread_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_contains_thread() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_contains_thread_attach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_contains_thread() + + @llgs_test + def test_qThreadInfo_contains_thread_attach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_contains_thread() + + def qThreadInfo_matches_qC(self): + procs = self.prep_debug_monitor_and_inferior() + + self.add_threadinfo_collection_packets() + self.test_sequence.add_log_lines( + ["read packet: $qC#00", + {"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}} + ], True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather threadinfo entries. + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + # We should have exactly one thread from threadinfo. + self.assertEqual(len(threads), 1) + + # We should have a valid thread_id from $QC. + QC_thread_id_hex = context.get("thread_id") + self.assertIsNotNone(QC_thread_id_hex) + QC_thread_id = int(QC_thread_id_hex, 16) + + # Those two should be the same. + self.assertEqual(threads[0], QC_thread_id) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_matches_qC_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_matches_qC() + + @llgs_test + def test_qThreadInfo_matches_qC_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_matches_qC() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_matches_qC_attach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_matches_qC() + + @llgs_test + def test_qThreadInfo_matches_qC_attach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_matches_qC() + + def p_returns_correct_data_size_for_each_qRegisterInfo(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.assertTrue(len(reg_infos) > 0) + + byte_order = self.get_target_byte_order() + + # Read value for each register. + reg_index = 0 + for reg_info in reg_infos: + # Skip registers that don't have a register set. For x86, these are + # the DRx registers, which have no LLDB-kind register number and thus + # cannot be read via normal + # NativeRegisterContext::ReadRegister(reg_info,...) calls. + if not "set" in reg_info: + continue + + # Clear existing packet expectations. + self.reset_test_sequence() + + # Run the register query + self.test_sequence.add_log_lines( + ["read packet: $p{0:x}#00".format(reg_index), + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the response length. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + + if "dynamic_size_dwarf_expr_bytes" in reg_info: + self.updateRegInfoBitsize(reg_info, byte_order) + self.assertEqual(len(p_response), 2 * int(reg_info["bitsize"]) / 8) + + # Increment loop + reg_index += 1 + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @llgs_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @llgs_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + def Hg_switches_to_3_threads(self): + # Startup the inferior with three threads (main + 2 new ones). + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["thread:new", "thread:new"]) + + # Let the inferior process have a few moments to start up the thread + # when launched. (The launch scenario has no time to run, so threads + # won't be there yet.) + self.run_process_then_stop(run_seconds=1) + + # Wait at most x seconds for 3 threads to be present. + threads = self.wait_for_thread_count(3, timeout_seconds=5) + self.assertEqual(len(threads), 3) + + # verify we can $H to each thead, and $qC matches the thread we set. + for thread in threads: + # Change to each thread, verify current thread id. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $Hg{0:x}#00".format(thread), # Set current thread. + "send packet: $OK#00", + "read packet: $qC#00", + {"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}}], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the thread id. + self.assertIsNotNone(context.get("thread_id")) + self.assertEqual(int(context.get("thread_id"), 16), thread) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_Hg_switches_to_3_threads_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.Hg_switches_to_3_threads() + + @llgs_test + def test_Hg_switches_to_3_threads_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.Hg_switches_to_3_threads() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_Hg_switches_to_3_threads_attach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.Hg_switches_to_3_threads() + + @llgs_test + def test_Hg_switches_to_3_threads_attach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.Hg_switches_to_3_threads() + + def Hc_then_Csignal_signals_correct_thread(self, segfault_signo): + # NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached, + # and the test requires getting stdout from the exe. + + NUM_THREADS = 3 + + # Startup the inferior with three threads (main + NUM_THREADS-1 worker threads). + # inferior_args=["thread:print-ids"] + inferior_args = ["thread:segfault"] + for i in range(NUM_THREADS - 1): + # if i > 0: + # Give time between thread creation/segfaulting for the handler to work. + # inferior_args.append("sleep:1") + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + + # Launch/attach. (In our case, this should only ever be launched since + # we need inferior stdout/stderr). + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + self.test_sequence.add_log_lines(["read packet: $c#63"], True) + context = self.expect_gdbremote_sequence() + + # Let the inferior process have a few moments to start up the thread when launched. + # context = self.run_process_then_stop(run_seconds=1) + + # Wait at most x seconds for all threads to be present. + # threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5) + # self.assertEquals(len(threads), NUM_THREADS) + + signaled_tids = {} + print_thread_ids = {} + + # Switch to each thread, deliver a signal, and verify signal delivery + for i in range(NUM_THREADS - 1): + # Run until SIGSEGV comes in. + self.reset_test_sequence() + self.test_sequence.add_log_lines([{"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", + "capture": {1: "signo", + 2: "thread_id"}}], + True) + + context = self.expect_gdbremote_sequence(timeout_seconds=10) + self.assertIsNotNone(context) + signo = context.get("signo") + self.assertEqual(int(signo, 16), segfault_signo) + + # Ensure we haven't seen this tid yet. + thread_id = int(context.get("thread_id"), 16) + self.assertFalse(thread_id in signaled_tids) + signaled_tids[thread_id] = 1 + + # Send SIGUSR1 to the thread that signaled the SIGSEGV. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Set the continue thread. + # Set current thread. + "read packet: $Hc{0:x}#00".format(thread_id), + "send packet: $OK#00", + + # Continue sending the signal number to the continue thread. + # The commented out packet is a way to do this same operation without using + # a $Hc (but this test is testing $Hc, so we'll stick with the former). + "read packet: $C{0:x}#00".format(lldbutil.get_signal_number('SIGUSR1')), + # "read packet: $vCont;C{0:x}:{1:x};c#00".format(lldbutil.get_signal_number('SIGUSR1'), thread_id), + + # FIXME: Linux does not report the thread stop on the delivered signal (SIGUSR1 here). MacOSX debugserver does. + # But MacOSX debugserver isn't guaranteeing the thread the signal handler runs on, so currently its an XFAIL. + # Need to rectify behavior here. The linux behavior is more intuitive to me since we're essentially swapping out + # an about-to-be-delivered signal (for which we already sent a stop packet) to a different signal. + # {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, + # "read packet: $c#63", + {"type": "output_match", "regex": r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\nthread ([0-9a-fA-F]+): past SIGSEGV\r\n", "capture": {1: "print_thread_id", 2: "post_handle_thread_id"}}, + ], + True) + + # Run the sequence. + context = self.expect_gdbremote_sequence(timeout_seconds=10) + self.assertIsNotNone(context) + + # Ensure the stop signal is the signal we delivered. + # stop_signo = context.get("stop_signo") + # self.assertIsNotNone(stop_signo) + # self.assertEquals(int(stop_signo,16), lldbutil.get_signal_number('SIGUSR1')) + + # Ensure the stop thread is the thread to which we delivered the signal. + # stop_thread_id = context.get("stop_thread_id") + # self.assertIsNotNone(stop_thread_id) + # self.assertEquals(int(stop_thread_id,16), thread_id) + + # Ensure we haven't seen this thread id yet. The inferior's + # self-obtained thread ids are not guaranteed to match the stub + # tids (at least on MacOSX). + print_thread_id = context.get("print_thread_id") + self.assertIsNotNone(print_thread_id) + print_thread_id = int(print_thread_id, 16) + self.assertFalse(print_thread_id in print_thread_ids) + + # Now remember this print (i.e. inferior-reflected) thread id and + # ensure we don't hit it again. + print_thread_ids[print_thread_id] = 1 + + # Ensure post signal-handle thread id matches the thread that + # initially raised the SIGSEGV. + post_handle_thread_id = context.get("post_handle_thread_id") + self.assertIsNotNone(post_handle_thread_id) + post_handle_thread_id = int(post_handle_thread_id, 16) + self.assertEqual(post_handle_thread_id, print_thread_id) + + @unittest2.expectedFailure() + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + # Darwin debugserver translates some signals like SIGSEGV into some gdb + # expectations about fixed signal numbers. + self.Hc_then_Csignal_signals_correct_thread(self.TARGET_EXC_BAD_ACCESS) + + @llgs_test + def test_Hc_then_Csignal_signals_correct_thread_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.Hc_then_Csignal_signals_correct_thread( + lldbutil.get_signal_number('SIGSEGV')) + + def m_packet_reads_memory(self): + # This is the memory we will write into the inferior and then ensure we + # can read back with $m. + MEMORY_CONTENTS = "Test contents 0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz" + + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "set-message:%s" % + MEMORY_CONTENTS, + "get-data-address-hex:g_message", + "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "message_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the message address. + self.assertIsNotNone(context.get("message_address")) + message_address = int(context.get("message_address"), 16) + + # Grab contents from the inferior. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $m{0:x},{1:x}#00".format(message_address, len(MEMORY_CONTENTS)), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "read_contents"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure what we read from inferior memory is what we wrote. + self.assertIsNotNone(context.get("read_contents")) + read_contents = seven.unhexlify(context.get("read_contents")) + self.assertEqual(read_contents, MEMORY_CONTENTS) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_m_packet_reads_memory_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.m_packet_reads_memory() + + @llgs_test + def test_m_packet_reads_memory_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.m_packet_reads_memory() + + def qMemoryRegionInfo_is_supported(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior() + + # Ask if it supports $qMemoryRegionInfo. + self.test_sequence.add_log_lines( + ["read packet: $qMemoryRegionInfo#00", + "send packet: $OK#00" + ], True) + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_is_supported_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_is_supported() + + @llgs_test + def test_qMemoryRegionInfo_is_supported_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_is_supported() + + def qMemoryRegionInfo_reports_code_address_as_executable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-code-address-hex:hello", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "code_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the code address. + self.assertIsNotNone(context.get("code_address")) + code_address = int(context.get("code_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(code_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure code address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("x" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(code_address, mem_region_dict) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_reports_code_address_as_executable_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_code_address_as_executable() + + @llgs_test + def test_qMemoryRegionInfo_reports_code_address_as_executable_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_code_address_as_executable() + + def qMemoryRegionInfo_reports_stack_address_as_readable_writeable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-stack-address-hex:", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"stack address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "stack_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the address. + self.assertIsNotNone(context.get("stack_address")) + stack_address = int(context.get("stack_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(stack_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("w" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region( + stack_address, mem_region_dict) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() + + @llgs_test + def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() + + def qMemoryRegionInfo_reports_heap_address_as_readable_writeable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-heap-address-hex:", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"heap address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "heap_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the address. + self.assertIsNotNone(context.get("heap_address")) + heap_address = int(context.get("heap_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(heap_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("w" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(heap_address, mem_region_dict) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() + + @llgs_test + def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() + + def breakpoint_set_and_remove_work(self, want_hardware=False): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "get-code-address-hex:hello", + "sleep:1", + "call-function:hello"]) + + # Run the process + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + self.test_sequence.add_log_lines( + [ # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the function call entry point. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "function_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info - we need endian of target to handle register + # value conversions. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + (pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_infos) + self.assertIsNotNone(pc_lldb_reg_index) + self.assertIsNotNone(pc_reg_info) + + # Grab the function address. + self.assertIsNotNone(context.get("function_address")) + function_address = int(context.get("function_address"), 16) + + # Get current target architecture + target_arch = self.getArchitecture() + + # Set the breakpoint. + if (target_arch == "arm") or (target_arch == "aarch64"): + # TODO: Handle case when setting breakpoint in thumb code + BREAKPOINT_KIND = 4 + else: + BREAKPOINT_KIND = 1 + + # Set default packet type to Z0 (software breakpoint) + z_packet_type = 0 + + # If hardware breakpoint is requested set packet type to Z1 + if want_hardware == True: + z_packet_type = 1 + + self.reset_test_sequence() + self.add_set_breakpoint_packets( + function_address, + z_packet_type, + do_continue=True, + breakpoint_kind=BREAKPOINT_KIND) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the stop signal reported was the breakpoint signal number. + stop_signo = context.get("stop_signo") + self.assertIsNotNone(stop_signo) + self.assertEqual(int(stop_signo, 16), + lldbutil.get_signal_number('SIGTRAP')) + + # Ensure we did not receive any output. If the breakpoint was not set, we would + # see output (from a launched process with captured stdio) printing a hello, world message. + # That would indicate the breakpoint didn't take. + self.assertEqual(len(context["O_content"]), 0) + + # Verify that the PC for the main thread is where we expect it - right at the breakpoint address. + # This acts as a another validation on the register reading code. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Print the PC. This should match the breakpoint address. + "read packet: $p{0:x}#00".format(pc_lldb_reg_index), + # Capture $p results. + {"direction": "send", + "regex": r"^\$([0-9a-fA-F]+)#", + "capture": {1: "p_response"}}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the PC is where we expect. Note response is in endianness of + # the inferior. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + + # Convert from target endian to int. + returned_pc = lldbgdbserverutils.unpack_register_hex_unsigned( + endian, p_response) + self.assertEqual(returned_pc, function_address) + + # Verify that a breakpoint remove and continue gets us the expected + # output. + self.reset_test_sequence() + + # Add breakpoint remove packets + self.add_remove_breakpoint_packets( + function_address, + z_packet_type, + breakpoint_kind=BREAKPOINT_KIND) + + self.test_sequence.add_log_lines( + [ + # Continue running. + "read packet: $c#63", + # We should now receive the output from the call. + {"type": "output_match", "regex": r"^hello, world\r\n$"}, + # And wait for program completion. + {"direction": "send", "regex": r"^\$W00(.*)#[0-9a-fA-F]{2}$"}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_software_breakpoint_set_and_remove_work_debugserver(self): + self.init_debugserver_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=False) + + @llgs_test + @expectedFlakeyLinux("llvm.org/pr25652") + def test_software_breakpoint_set_and_remove_work_llgs(self): + self.init_llgs_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=False) + + @debugserver_test + @skipUnlessPlatform(oslist=['linux']) + @expectedFailureAndroid + @skipIf(archs=no_match(['arm', 'aarch64'])) + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_hardware_breakpoint_set_and_remove_work_debugserver(self): + self.init_debugserver_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=True) + + @llgs_test + @skipUnlessPlatform(oslist=['linux']) + @expectedFailureAndroid + @skipIf(archs=no_match(['arm', 'aarch64'])) + def test_hardware_breakpoint_set_and_remove_work_llgs(self): + self.init_llgs_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=True) + + def qSupported_returns_known_stub_features(self): + # Start up the stub and start/prep the inferior. + procs = self.prep_debug_monitor_and_inferior() + self.add_qSupported_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Retrieve the qSupported features. + supported_dict = self.parse_qSupported_response(context) + self.assertIsNotNone(supported_dict) + self.assertTrue(len(supported_dict) > 0) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_qSupported_returns_known_stub_features_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qSupported_returns_known_stub_features() + + @llgs_test + def test_qSupported_returns_known_stub_features_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qSupported_returns_known_stub_features() + + def written_M_content_reads_back_correctly(self): + TEST_MESSAGE = "Hello, memory" + + # Start up the stub and start/prep the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "set-message:xxxxxxxxxxxxxX", + "get-data-address-hex:g_message", + "sleep:1", + "print-message:"]) + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "message_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the message address. + self.assertIsNotNone(context.get("message_address")) + message_address = int(context.get("message_address"), 16) + + # Hex-encode the test message, adding null termination. + hex_encoded_message = seven.hexlify(TEST_MESSAGE) + + # Write the message to the inferior. Verify that we can read it with the hex-encoded (m) + # and binary (x) memory read packets. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $M{0:x},{1:x}:{2}#00".format(message_address, len(TEST_MESSAGE), hex_encoded_message), + "send packet: $OK#00", + "read packet: $m{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)), + "send packet: ${0}#00".format(hex_encoded_message), + "read packet: $x{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)), + "send packet: ${0}#00".format(TEST_MESSAGE), + "read packet: $m{0:x},4#00".format(message_address), + "send packet: ${0}#00".format(hex_encoded_message[0:8]), + "read packet: $x{0:x},4#00".format(message_address), + "send packet: ${0}#00".format(TEST_MESSAGE[0:4]), + "read packet: $c#63", + {"type": "output_match", "regex": r"^message: (.+)\r\n$", "capture": {1: "printed_message"}}, + "send packet: $W00#00", + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure what we read from inferior memory is what we wrote. + printed_message = context.get("printed_message") + self.assertIsNotNone(printed_message) + self.assertEqual(printed_message, TEST_MESSAGE + "X") + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_written_M_content_reads_back_correctly_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.written_M_content_reads_back_correctly() + + @llgs_test + @expectedFlakeyLinux("llvm.org/pr25652") + def test_written_M_content_reads_back_correctly_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.written_M_content_reads_back_correctly() + + def P_writes_all_gpr_registers(self): + # Start inferior debug session, grab all register info. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Process register infos. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + # Process endian. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Pull out the register infos that we think we can bit flip + # successfully,. + gpr_reg_infos = [ + reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] + self.assertTrue(len(gpr_reg_infos) > 0) + + # Write flipped bit pattern of existing value to each register. + (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value( + gpr_reg_infos, endian) + # print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes)) + self.assertTrue(successful_writes > 0) + + # Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp). + # Come back to this. I have the test rigged to verify that at least some + # of the bit-flip writes work. + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_P_writes_all_gpr_registers_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.P_writes_all_gpr_registers() + + @llgs_test + def test_P_writes_all_gpr_registers_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.P_writes_all_gpr_registers() + + def P_and_p_thread_suffix_work(self): + # Startup the inferior with three threads. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["thread:new", "thread:new"]) + self.add_thread_suffix_request_packets() + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + reg_index = self.select_modifiable_register(reg_infos) + self.assertIsNotNone(reg_index) + reg_byte_size = int(reg_infos[reg_index]["bitsize"]) // 8 + self.assertTrue(reg_byte_size > 0) + + # Run the process a bit so threads can start up, and collect register + # info. + context = self.run_process_then_stop(run_seconds=1) + self.assertIsNotNone(context) + + # Wait for 3 threads to be present. + threads = self.wait_for_thread_count(3, timeout_seconds=5) + self.assertEqual(len(threads), 3) + + expected_reg_values = [] + register_increment = 1 + next_value = None + + # Set the same register in each of 3 threads to a different value. + # Verify each one has the unique value. + for thread in threads: + # If we don't have a next value yet, start it with the initial read + # value + 1 + if not next_value: + # Read pre-existing register value. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Set the next value to use for writing as the increment plus + # current value. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + next_value = lldbgdbserverutils.unpack_register_hex_unsigned( + endian, p_response) + + # Set new value using P and thread suffix. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $P{0:x}={1};thread:{2:x}#00".format( + reg_index, + lldbgdbserverutils.pack_register_hex( + endian, + next_value, + byte_size=reg_byte_size), + thread), + "send packet: $OK#00", + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Save the value we set. + expected_reg_values.append(next_value) + + # Increment value for next thread to use (we want them all + # different so we can verify they wrote to each thread correctly + # next.) + next_value += register_increment + + # Revisit each thread and verify they have the expected value set for + # the register we wrote. + thread_index = 0 + for thread in threads: + # Read pre-existing register value. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Get the register value. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + read_value = lldbgdbserverutils.unpack_register_hex_unsigned( + endian, p_response) + + # Make sure we read back what we wrote. + self.assertEqual(read_value, expected_reg_values[thread_index]) + thread_index += 1 + + # Note: as of this moment, a hefty number of the GPR writes are failing + # with E32 (everything except rax-rdx, rdi, rsi, rbp). + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_P_and_p_thread_suffix_work_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.P_and_p_thread_suffix_work() + + @llgs_test + def test_P_and_p_thread_suffix_work_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.P_and_p_thread_suffix_work() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubReverseConnect.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubReverseConnect.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubReverseConnect.py @@ -0,0 +1,98 @@ +from __future__ import print_function + +import gdbremote_testcase +import lldbgdbserverutils +import re +import select +import socket +import time +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestStubReverseConnect(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + _DEFAULT_TIMEOUT = 20 + + def setUp(self): + # Set up the test. + gdbremote_testcase.GdbRemoteTestCaseBase.setUp(self) + + # Create a listener on a local port. + self.listener_socket = self.create_listener_socket() + self.assertIsNotNone(self.listener_socket) + self.listener_port = self.listener_socket.getsockname()[1] + + def create_listener_socket(self, timeout_seconds=_DEFAULT_TIMEOUT): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.assertIsNotNone(sock) + + sock.settimeout(timeout_seconds) + sock.bind(("127.0.0.1", 0)) + sock.listen(1) + + def tear_down_listener(): + try: + sock.shutdown(socket.SHUT_RDWR) + except: + # ignore + None + + self.addTearDownHook(tear_down_listener) + return sock + + def reverse_connect_works(self): + # Indicate stub startup should do a reverse connect. + appended_stub_args = ["--reverse-connect"] + if self.debug_monitor_extra_args: + self.debug_monitor_extra_args += appended_stub_args + else: + self.debug_monitor_extra_args = appended_stub_args + + self.stub_hostname = "127.0.0.1" + self.port = self.listener_port + + triple = self.dbg.GetSelectedPlatform().GetTriple() + if re.match(".*-.*-.*-android", triple): + self.forward_adb_port( + self.port, + self.port, + "reverse", + self.stub_device) + + # Start the stub. + server = self.launch_debug_monitor(logfile=sys.stdout) + self.assertIsNotNone(server) + self.assertTrue( + lldbgdbserverutils.process_is_running( + server.pid, True)) + + # Listen for the stub's connection to us. + (stub_socket, address) = self.listener_socket.accept() + self.assertIsNotNone(stub_socket) + self.assertIsNotNone(address) + print("connected to stub {} on {}".format( + address, stub_socket.getsockname())) + + # Verify we can do the handshake. If that works, we'll call it good. + self.do_handshake(stub_socket, timeout_seconds=self._DEFAULT_TIMEOUT) + + # Clean up. + stub_socket.shutdown(socket.SHUT_RDWR) + + @debugserver_test + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def test_reverse_connect_works_debugserver(self): + self.init_debugserver_test(use_named_pipe=False) + self.set_inferior_startup_launch() + self.reverse_connect_works() + + @llgs_test + @skipIfRemote # reverse connect is not a supported use case for now + def test_reverse_connect_works_llgs(self): + self.init_llgs_test(use_named_pipe=False) + self.set_inferior_startup_launch() + self.reverse_connect_works() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubSetSID.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubSetSID.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubSetSID.py @@ -0,0 +1,86 @@ +from __future__ import print_function + + +import gdbremote_testcase +import lldbgdbserverutils +import os +import select +import tempfile +import time +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestStubSetSIDTestCase(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def get_stub_sid(self, extra_stub_args=None): + # Launch debugserver + if extra_stub_args: + self.debug_monitor_extra_args += extra_stub_args + + server = self.launch_debug_monitor() + self.assertIsNotNone(server) + self.assertTrue( + lldbgdbserverutils.process_is_running( + server.pid, True)) + + # Get the process id for the stub. + return os.getsid(server.pid) + + def sid_is_same_without_setsid(self): + stub_sid = self.get_stub_sid() + self.assertEqual(stub_sid, os.getsid(0)) + + def sid_is_different_with_setsid(self): + stub_sid = self.get_stub_sid(["--setsid"]) + self.assertNotEqual(stub_sid, os.getsid(0)) + + def sid_is_different_with_S(self): + stub_sid = self.get_stub_sid(["-S"]) + self.assertNotEqual(stub_sid, os.getsid(0)) + + @debugserver_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_same_without_setsid_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_same_without_setsid() + + @llgs_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + @expectedFailureAll(oslist=['freebsd']) + def test_sid_is_same_without_setsid_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_same_without_setsid() + + @debugserver_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_setsid_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_setsid() + + @llgs_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_setsid_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_setsid() + + @debugserver_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_S_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_S() + + @llgs_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_S_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_S() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py @@ -0,0 +1,1643 @@ +""" +Base class for gdb-remote test cases. +""" + +from __future__ import division, print_function + + +import errno +import os +import os.path +import platform +import random +import re +import select +import signal +import socket +import subprocess +import sys +import tempfile +import time +from lldbsuite.test import configuration +from lldbsuite.test.lldbtest import * +from lldbsuite.support import seven +from lldbgdbserverutils import * +import logging + + +class _ConnectionRefused(IOError): + pass + + +class GdbRemoteTestCaseBase(TestBase): + + NO_DEBUG_INFO_TESTCASE = True + + _TIMEOUT_SECONDS = 120 + + _GDBREMOTE_KILL_PACKET = "$k#6b" + + # Start the inferior separately, attach to the inferior on the stub + # command line. + _STARTUP_ATTACH = "attach" + # Start the inferior separately, start the stub without attaching, allow + # the test to attach to the inferior however it wants (e.g. $vAttach;pid). + _STARTUP_ATTACH_MANUALLY = "attach_manually" + # Start the stub, and launch the inferior with an $A packet via the + # initial packet stream. + _STARTUP_LAUNCH = "launch" + + # GDB Signal numbers that are not target-specific used for common + # exceptions + TARGET_EXC_BAD_ACCESS = 0x91 + TARGET_EXC_BAD_INSTRUCTION = 0x92 + TARGET_EXC_ARITHMETIC = 0x93 + TARGET_EXC_EMULATION = 0x94 + TARGET_EXC_SOFTWARE = 0x95 + TARGET_EXC_BREAKPOINT = 0x96 + + _verbose_log_handler = None + _log_formatter = logging.Formatter( + fmt='%(asctime)-15s %(levelname)-8s %(message)s') + + def setUpBaseLogging(self): + self.logger = logging.getLogger(__name__) + + if len(self.logger.handlers) > 0: + return # We have set up this handler already + + self.logger.propagate = False + self.logger.setLevel(logging.DEBUG) + + # log all warnings to stderr + handler = logging.StreamHandler() + handler.setLevel(logging.WARNING) + handler.setFormatter(self._log_formatter) + self.logger.addHandler(handler) + + def isVerboseLoggingRequested(self): + # We will report our detailed logs if the user requested that the "gdb-remote" channel is + # logged. + return any(("gdb-remote" in channel) + for channel in lldbtest_config.channels) + + def setUp(self): + TestBase.setUp(self) + + self.setUpBaseLogging() + self.debug_monitor_extra_args = [] + self._pump_queues = socket_packet_pump.PumpQueues() + + if self.isVerboseLoggingRequested(): + # If requested, full logs go to a log file + self._verbose_log_handler = logging.FileHandler( + self.log_basename + "-host.log") + self._verbose_log_handler.setFormatter(self._log_formatter) + self._verbose_log_handler.setLevel(logging.DEBUG) + self.logger.addHandler(self._verbose_log_handler) + + self.test_sequence = GdbRemoteTestSequence(self.logger) + self.set_inferior_startup_launch() + self.port = self.get_next_port() + self.named_pipe_path = None + self.named_pipe = None + self.named_pipe_fd = None + self.stub_sends_two_stop_notifications_on_kill = False + if configuration.lldb_platform_url: + if configuration.lldb_platform_url.startswith('unix-'): + url_pattern = '(.+)://\[?(.+?)\]?/.*' + else: + url_pattern = '(.+)://(.+):\d+' + scheme, host = re.match( + url_pattern, configuration.lldb_platform_url).groups() + if configuration.lldb_platform_name == 'remote-android' and host != 'localhost': + self.stub_device = host + self.stub_hostname = 'localhost' + else: + self.stub_device = None + self.stub_hostname = host + else: + self.stub_hostname = "localhost" + + def tearDown(self): + self._pump_queues.verify_queues_empty() + + self.logger.removeHandler(self._verbose_log_handler) + self._verbose_log_handler = None + TestBase.tearDown(self) + + def getLocalServerLogFile(self): + return self.log_basename + "-server.log" + + def setUpServerLogging(self, is_llgs): + if len(lldbtest_config.channels) == 0: + return # No logging requested + + if lldb.remote_platform: + log_file = lldbutil.join_remote_paths( + lldb.remote_platform.GetWorkingDirectory(), "server.log") + else: + log_file = self.getLocalServerLogFile() + + if is_llgs: + self.debug_monitor_extra_args.append("--log-file=" + log_file) + self.debug_monitor_extra_args.append( + "--log-channels={}".format(":".join(lldbtest_config.channels))) + else: + self.debug_monitor_extra_args = [ + "--log-file=" + log_file, "--log-flags=0x800000"] + + def get_next_port(self): + return 12000 + random.randint(0, 3999) + + def reset_test_sequence(self): + self.test_sequence = GdbRemoteTestSequence(self.logger) + + def create_named_pipe(self): + # Create a temp dir and name for a pipe. + temp_dir = tempfile.mkdtemp() + named_pipe_path = os.path.join(temp_dir, "stub_port_number") + + # Create the named pipe. + os.mkfifo(named_pipe_path) + + # Open the read side of the pipe in non-blocking mode. This will + # return right away, ready or not. + named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK) + + # Create the file for the named pipe. Note this will follow semantics of + # a non-blocking read side of a named pipe, which has different semantics + # than a named pipe opened for read in non-blocking mode. + named_pipe = os.fdopen(named_pipe_fd, "r") + self.assertIsNotNone(named_pipe) + + def shutdown_named_pipe(): + # Close the pipe. + try: + named_pipe.close() + except: + print("failed to close named pipe") + None + + # Delete the pipe. + try: + os.remove(named_pipe_path) + except: + print("failed to delete named pipe: {}".format(named_pipe_path)) + None + + # Delete the temp directory. + try: + os.rmdir(temp_dir) + except: + print( + "failed to delete temp dir: {}, directory contents: '{}'".format( + temp_dir, os.listdir(temp_dir))) + None + + # Add the shutdown hook to clean up the named pipe. + self.addTearDownHook(shutdown_named_pipe) + + # Clear the port so the stub selects a port number. + self.port = 0 + + return (named_pipe_path, named_pipe, named_pipe_fd) + + def get_stub_port_from_named_socket(self, read_timeout_seconds=5): + # Wait for something to read with a max timeout. + (ready_readers, _, _) = select.select( + [self.named_pipe_fd], [], [], read_timeout_seconds) + self.assertIsNotNone( + ready_readers, + "write side of pipe has not written anything - stub isn't writing to pipe.") + self.assertNotEqual( + len(ready_readers), + 0, + "write side of pipe has not written anything - stub isn't writing to pipe.") + + # Read the port from the named pipe. + stub_port_raw = self.named_pipe.read() + self.assertIsNotNone(stub_port_raw) + self.assertNotEqual( + len(stub_port_raw), + 0, + "no content to read on pipe") + + # Trim null byte, convert to int. + stub_port_raw = stub_port_raw[:-1] + stub_port = int(stub_port_raw) + self.assertTrue(stub_port > 0) + + return stub_port + + def init_llgs_test(self, use_named_pipe=True): + if lldb.remote_platform: + # Remote platforms don't support named pipe based port negotiation + use_named_pipe = False + + # Grab the ppid from /proc/[shell pid]/stat + err, retcode, shell_stat = self.run_platform_command( + "cat /proc/$$/stat") + self.assertTrue( + err.Success() and retcode == 0, + "Failed to read file /proc/$$/stat: %s, retcode: %d" % + (err.GetCString(), + retcode)) + + # [pid] ([executable]) [state] [*ppid*] + pid = re.match(r"^\d+ \(.+\) . (\d+)", shell_stat).group(1) + err, retcode, ls_output = self.run_platform_command( + "ls -l /proc/%s/exe" % pid) + self.assertTrue( + err.Success() and retcode == 0, + "Failed to read file /proc/%s/exe: %s, retcode: %d" % + (pid, + err.GetCString(), + retcode)) + exe = ls_output.split()[-1] + + # If the binary has been deleted, the link name has " (deleted)" appended. + # Remove if it's there. + self.debug_monitor_exe = re.sub(r' \(deleted\)$', '', exe) + else: + self.debug_monitor_exe = get_lldb_server_exe() + if not self.debug_monitor_exe: + self.skipTest("lldb-server exe not found") + + self.debug_monitor_extra_args = ["gdbserver"] + self.setUpServerLogging(is_llgs=True) + + if use_named_pipe: + (self.named_pipe_path, self.named_pipe, + self.named_pipe_fd) = self.create_named_pipe() + + def init_debugserver_test(self, use_named_pipe=True): + self.debug_monitor_exe = get_debugserver_exe() + if not self.debug_monitor_exe: + self.skipTest("debugserver exe not found") + self.setUpServerLogging(is_llgs=False) + if use_named_pipe: + (self.named_pipe_path, self.named_pipe, + self.named_pipe_fd) = self.create_named_pipe() + # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification + # when the process truly dies. + self.stub_sends_two_stop_notifications_on_kill = True + + def forward_adb_port(self, source, target, direction, device): + adb = ['adb'] + (['-s', device] if device else []) + [direction] + + def remove_port_forward(): + subprocess.call(adb + ["--remove", "tcp:%d" % source]) + + subprocess.call(adb + ["tcp:%d" % source, "tcp:%d" % target]) + self.addTearDownHook(remove_port_forward) + + def _verify_socket(self, sock): + # Normally, when the remote stub is not ready, we will get ECONNREFUSED during the + # connect() attempt. However, due to the way how ADB forwarding works, on android targets + # the connect() will always be successful, but the connection will be immediately dropped + # if ADB could not connect on the remote side. This function tries to detect this + # situation, and report it as "connection refused" so that the upper layers attempt the + # connection again. + triple = self.dbg.GetSelectedPlatform().GetTriple() + if not re.match(".*-.*-.*-android", triple): + return # Not android. + can_read, _, _ = select.select([sock], [], [], 0.1) + if sock not in can_read: + return # Data is not available, but the connection is alive. + if len(sock.recv(1, socket.MSG_PEEK)) == 0: + raise _ConnectionRefused() # Got EOF, connection dropped. + + def create_socket(self): + sock = socket.socket() + logger = self.logger + + triple = self.dbg.GetSelectedPlatform().GetTriple() + if re.match(".*-.*-.*-android", triple): + self.forward_adb_port( + self.port, + self.port, + "forward", + self.stub_device) + + logger.info( + "Connecting to debug monitor on %s:%d", + self.stub_hostname, + self.port) + connect_info = (self.stub_hostname, self.port) + try: + sock.connect(connect_info) + except socket.error as serr: + if serr.errno == errno.ECONNREFUSED: + raise _ConnectionRefused() + raise serr + + def shutdown_socket(): + if sock: + try: + # send the kill packet so lldb-server shuts down gracefully + sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET) + except: + logger.warning( + "failed to send kill packet to debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + + try: + sock.close() + except: + logger.warning( + "failed to close socket to debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + + self.addTearDownHook(shutdown_socket) + + self._verify_socket(sock) + + return sock + + def set_inferior_startup_launch(self): + self._inferior_startup = self._STARTUP_LAUNCH + + def set_inferior_startup_attach(self): + self._inferior_startup = self._STARTUP_ATTACH + + def set_inferior_startup_attach_manually(self): + self._inferior_startup = self._STARTUP_ATTACH_MANUALLY + + def get_debug_monitor_command_line_args(self, attach_pid=None): + if lldb.remote_platform: + commandline_args = self.debug_monitor_extra_args + \ + ["*:{}".format(self.port)] + else: + commandline_args = self.debug_monitor_extra_args + \ + ["127.0.0.1:{}".format(self.port)] + + if attach_pid: + commandline_args += ["--attach=%d" % attach_pid] + if self.named_pipe_path: + commandline_args += ["--named-pipe", self.named_pipe_path] + return commandline_args + + def get_target_byte_order(self): + inferior_exe_path = self.getBuildArtifact("a.out") + target = self.dbg.CreateTarget(inferior_exe_path) + return target.GetByteOrder() + + def launch_debug_monitor(self, attach_pid=None, logfile=None): + # Create the command line. + commandline_args = self.get_debug_monitor_command_line_args( + attach_pid=attach_pid) + + # Start the server. + server = self.spawnSubprocess( + self.debug_monitor_exe, + commandline_args, + install_remote=False) + self.addTearDownHook(self.cleanupSubprocesses) + self.assertIsNotNone(server) + + # If we're receiving the stub's listening port from the named pipe, do + # that here. + if self.named_pipe: + self.port = self.get_stub_port_from_named_socket() + + return server + + def connect_to_debug_monitor(self, attach_pid=None): + if self.named_pipe: + # Create the stub. + server = self.launch_debug_monitor(attach_pid=attach_pid) + self.assertIsNotNone(server) + + def shutdown_debug_monitor(): + try: + server.terminate() + except: + logger.warning( + "failed to terminate server for debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + self.addTearDownHook(shutdown_debug_monitor) + + # Schedule debug monitor to be shut down during teardown. + logger = self.logger + + # Attach to the stub and return a socket opened to it. + self.sock = self.create_socket() + return server + + # We're using a random port algorithm to try not to collide with other ports, + # and retry a max # times. + attempts = 0 + MAX_ATTEMPTS = 20 + + while attempts < MAX_ATTEMPTS: + server = self.launch_debug_monitor(attach_pid=attach_pid) + + # Schedule debug monitor to be shut down during teardown. + logger = self.logger + + def shutdown_debug_monitor(): + try: + server.terminate() + except: + logger.warning( + "failed to terminate server for debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + self.addTearDownHook(shutdown_debug_monitor) + + connect_attemps = 0 + MAX_CONNECT_ATTEMPTS = 10 + + while connect_attemps < MAX_CONNECT_ATTEMPTS: + # Create a socket to talk to the server + try: + logger.info("Connect attempt %d", connect_attemps + 1) + self.sock = self.create_socket() + return server + except _ConnectionRefused as serr: + # Ignore, and try again. + pass + time.sleep(0.5) + connect_attemps += 1 + + # We should close the server here to be safe. + server.terminate() + + # Increment attempts. + print( + "connect to debug monitor on port %d failed, attempt #%d of %d" % + (self.port, attempts + 1, MAX_ATTEMPTS)) + attempts += 1 + + # And wait a random length of time before next attempt, to avoid + # collisions. + time.sleep(random.randint(1, 5)) + + # Now grab a new port number. + self.port = self.get_next_port() + + raise Exception( + "failed to create a socket to the launched debug monitor after %d tries" % + attempts) + + def launch_process_for_attach( + self, + inferior_args=None, + sleep_seconds=3, + exe_path=None): + # We're going to start a child process that the debug monitor stub can later attach to. + # This process needs to be started so that it just hangs around for a while. We'll + # have it sleep. + if not exe_path: + exe_path = self.getBuildArtifact("a.out") + + args = [] + if inferior_args: + args.extend(inferior_args) + if sleep_seconds: + args.append("sleep:%d" % sleep_seconds) + + inferior = self.spawnSubprocess(exe_path, args) + + def shutdown_process_for_attach(): + try: + inferior.terminate() + except: + logger.warning( + "failed to terminate inferior process for attach: {}; ignoring".format( + sys.exc_info()[0])) + self.addTearDownHook(shutdown_process_for_attach) + return inferior + + def prep_debug_monitor_and_inferior( + self, + inferior_args=None, + inferior_sleep_seconds=3, + inferior_exe_path=None): + """Prep the debug monitor, the inferior, and the expected packet stream. + + Handle the separate cases of using the debug monitor in attach-to-inferior mode + and in launch-inferior mode. + + For attach-to-inferior mode, the inferior process is first started, then + the debug monitor is started in attach to pid mode (using --attach on the + stub command line), and the no-ack-mode setup is appended to the packet + stream. The packet stream is not yet executed, ready to have more expected + packet entries added to it. + + For launch-inferior mode, the stub is first started, then no ack mode is + setup on the expected packet stream, then the verified launch packets are added + to the expected socket stream. The packet stream is not yet executed, ready + to have more expected packet entries added to it. + + The return value is: + {inferior:, server:} + """ + inferior = None + attach_pid = None + + if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY: + # Launch the process that we'll use as the inferior. + inferior = self.launch_process_for_attach( + inferior_args=inferior_args, + sleep_seconds=inferior_sleep_seconds, + exe_path=inferior_exe_path) + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + if self._inferior_startup == self._STARTUP_ATTACH: + # In this case, we want the stub to attach via the command + # line, so set the command line attach pid here. + attach_pid = inferior.pid + + if self._inferior_startup == self._STARTUP_LAUNCH: + # Build launch args + if not inferior_exe_path: + inferior_exe_path = self.getBuildArtifact("a.out") + + if lldb.remote_platform: + remote_path = lldbutil.append_to_process_working_directory(self, + os.path.basename(inferior_exe_path)) + remote_file_spec = lldb.SBFileSpec(remote_path, False) + err = lldb.remote_platform.Install(lldb.SBFileSpec( + inferior_exe_path, True), remote_file_spec) + if err.Fail(): + raise Exception( + "remote_platform.Install('%s', '%s') failed: %s" % + (inferior_exe_path, remote_path, err)) + inferior_exe_path = remote_path + + launch_args = [inferior_exe_path] + if inferior_args: + launch_args.extend(inferior_args) + + # Launch the debug monitor stub, attaching to the inferior. + server = self.connect_to_debug_monitor(attach_pid=attach_pid) + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + if self._inferior_startup == self._STARTUP_LAUNCH: + self.add_verified_launch_packets(launch_args) + + return {"inferior": inferior, "server": server} + + def expect_socket_recv( + self, + sock, + expected_content_regex, + timeout_seconds): + response = "" + timeout_time = time.time() + timeout_seconds + + while not expected_content_regex.match( + response) and time.time() < timeout_time: + can_read, _, _ = select.select([sock], [], [], timeout_seconds) + if can_read and sock in can_read: + recv_bytes = sock.recv(4096) + if recv_bytes: + response += seven.bitcast_to_string(recv_bytes) + + self.assertTrue(expected_content_regex.match(response)) + + def expect_socket_send(self, sock, content, timeout_seconds): + request_bytes_remaining = content + timeout_time = time.time() + timeout_seconds + + while len(request_bytes_remaining) > 0 and time.time() < timeout_time: + _, can_write, _ = select.select([], [sock], [], timeout_seconds) + if can_write and sock in can_write: + written_byte_count = sock.send(request_bytes_remaining.encode()) + request_bytes_remaining = request_bytes_remaining[ + written_byte_count:] + self.assertEqual(len(request_bytes_remaining), 0) + + def do_handshake(self, stub_socket, timeout_seconds=5): + # Write the ack. + self.expect_socket_send(stub_socket, "+", timeout_seconds) + + # Send the start no ack mode packet. + NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0" + bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST.encode()) + self.assertEqual(bytes_sent, len(NO_ACK_MODE_REQUEST)) + + # Receive the ack and "OK" + self.expect_socket_recv(stub_socket, re.compile( + r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds) + + # Send the final ack. + self.expect_socket_send(stub_socket, "+", timeout_seconds) + + def add_no_ack_remote_stream(self): + self.test_sequence.add_log_lines( + ["read packet: +", + "read packet: $QStartNoAckMode#b0", + "send packet: +", + "send packet: $OK#9a", + "read packet: +"], + True) + + def add_verified_launch_packets(self, launch_args): + self.test_sequence.add_log_lines( + ["read packet: %s" % build_gdbremote_A_packet(launch_args), + "send packet: $OK#00", + "read packet: $qLaunchSuccess#a5", + "send packet: $OK#00"], + True) + + def add_thread_suffix_request_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $QThreadSuffixSupported#e4", + "send packet: $OK#00", + ], True) + + def add_process_info_collection_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qProcessInfo#dc", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "process_info_raw"}}], + True) + + _KNOWN_PROCESS_INFO_KEYS = [ + "pid", + "parent-pid", + "real-uid", + "real-gid", + "effective-uid", + "effective-gid", + "cputype", + "cpusubtype", + "ostype", + "triple", + "vendor", + "endian", + "elf_abi", + "ptrsize" + ] + + def parse_process_info_response(self, context): + # Ensure we have a process info response. + self.assertIsNotNone(context) + process_info_raw = context.get("process_info_raw") + self.assertIsNotNone(process_info_raw) + + # Pull out key:value; pairs. + process_info_dict = { + match.group(1): match.group(2) for match in re.finditer( + r"([^:]+):([^;]+);", process_info_raw)} + + # Validate keys are known. + for (key, val) in list(process_info_dict.items()): + self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS) + self.assertIsNotNone(val) + + return process_info_dict + + def add_register_info_collection_packets(self): + self.test_sequence.add_log_lines( + [{"type": "multi_response", "query": "qRegisterInfo", "append_iteration_suffix": True, + "end_regex": re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"), + "save_key": "reg_info_responses"}], + True) + + def parse_register_info_packets(self, context): + """Return an array of register info dictionaries, one per register info.""" + reg_info_responses = context.get("reg_info_responses") + self.assertIsNotNone(reg_info_responses) + + # Parse register infos. + return [parse_reg_info_response(reg_info_response) + for reg_info_response in reg_info_responses] + + def expect_gdbremote_sequence(self, timeout_seconds=None): + if not timeout_seconds: + timeout_seconds = self._TIMEOUT_SECONDS + return expect_lldb_gdbserver_replay( + self, + self.sock, + self.test_sequence, + self._pump_queues, + timeout_seconds, + self.logger) + + _KNOWN_REGINFO_KEYS = [ + "name", + "alt-name", + "bitsize", + "offset", + "encoding", + "format", + "set", + "gcc", + "ehframe", + "dwarf", + "generic", + "container-regs", + "invalidate-regs", + "dynamic_size_dwarf_expr_bytes", + "dynamic_size_dwarf_len" + ] + + def assert_valid_reg_info(self, reg_info): + # Assert we know about all the reginfo keys parsed. + for key in reg_info: + self.assertTrue(key in self._KNOWN_REGINFO_KEYS) + + # Check the bare-minimum expected set of register info keys. + self.assertTrue("name" in reg_info) + self.assertTrue("bitsize" in reg_info) + self.assertTrue("offset" in reg_info) + self.assertTrue("encoding" in reg_info) + self.assertTrue("format" in reg_info) + + def find_pc_reg_info(self, reg_infos): + lldb_reg_index = 0 + for reg_info in reg_infos: + if ("generic" in reg_info) and (reg_info["generic"] == "pc"): + return (lldb_reg_index, reg_info) + lldb_reg_index += 1 + + return (None, None) + + def add_lldb_register_index(self, reg_infos): + """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry. + + We'll use this when we want to call packets like P/p with a register index but do so + on only a subset of the full register info set. + """ + self.assertIsNotNone(reg_infos) + + reg_index = 0 + for reg_info in reg_infos: + reg_info["lldb_register_index"] = reg_index + reg_index += 1 + + def add_query_memory_region_packets(self, address): + self.test_sequence.add_log_lines( + ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "memory_region_response"}}], + True) + + def parse_key_val_dict(self, key_val_text, allow_dupes=True): + self.assertIsNotNone(key_val_text) + kv_dict = {} + for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text): + key = match.group(1) + val = match.group(2) + if key in kv_dict: + if allow_dupes: + if isinstance(kv_dict[key], list): + kv_dict[key].append(val) + else: + # Promote to list + kv_dict[key] = [kv_dict[key], val] + else: + self.fail( + "key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format( + key, val, key_val_text, kv_dict)) + else: + kv_dict[key] = val + return kv_dict + + def parse_memory_region_packet(self, context): + # Ensure we have a context. + self.assertIsNotNone(context.get("memory_region_response")) + + # Pull out key:value; pairs. + mem_region_dict = self.parse_key_val_dict( + context.get("memory_region_response")) + + # Validate keys are known. + for (key, val) in list(mem_region_dict.items()): + self.assertTrue( + key in [ + "start", + "size", + "permissions", + "name", + "error"]) + self.assertIsNotNone(val) + + # Return the dictionary of key-value pairs for the memory region. + return mem_region_dict + + def assert_address_within_memory_region( + self, test_address, mem_region_dict): + self.assertIsNotNone(mem_region_dict) + self.assertTrue("start" in mem_region_dict) + self.assertTrue("size" in mem_region_dict) + + range_start = int(mem_region_dict["start"], 16) + range_size = int(mem_region_dict["size"], 16) + range_end = range_start + range_size + + if test_address < range_start: + self.fail( + "address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format( + test_address, + range_start, + range_end, + range_size)) + elif test_address >= range_end: + self.fail( + "address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format( + test_address, + range_start, + range_end, + range_size)) + + def add_threadinfo_collection_packets(self): + self.test_sequence.add_log_lines( + [{"type": "multi_response", "first_query": "qfThreadInfo", "next_query": "qsThreadInfo", + "append_iteration_suffix": False, "end_regex": re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"), + "save_key": "threadinfo_responses"}], + True) + + def parse_threadinfo_packets(self, context): + """Return an array of thread ids (decimal ints), one per thread.""" + threadinfo_responses = context.get("threadinfo_responses") + self.assertIsNotNone(threadinfo_responses) + + thread_ids = [] + for threadinfo_response in threadinfo_responses: + new_thread_infos = parse_threadinfo_response(threadinfo_response) + thread_ids.extend(new_thread_infos) + return thread_ids + + def wait_for_thread_count(self, thread_count, timeout_seconds=3): + start_time = time.time() + timeout_time = start_time + timeout_seconds + + actual_thread_count = 0 + while actual_thread_count < thread_count: + self.reset_test_sequence() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + actual_thread_count = len(threads) + + if time.time() > timeout_time: + raise Exception( + 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format( + timeout_seconds, thread_count, actual_thread_count)) + + return threads + + def add_set_breakpoint_packets( + self, + address, + z_packet_type=0, + do_continue=True, + breakpoint_kind=1): + self.test_sequence.add_log_lines( + [ # Set the breakpoint. + "read packet: $Z{2},{0:x},{1}#00".format( + address, breakpoint_kind, z_packet_type), + # Verify the stub could set it. + "send packet: $OK#00", + ], True) + + if (do_continue): + self.test_sequence.add_log_lines( + [ # Continue the inferior. + "read packet: $c#63", + # Expect a breakpoint stop report. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", + "capture": {1: "stop_signo", + 2: "stop_thread_id"}}, + ], True) + + def add_remove_breakpoint_packets( + self, + address, + z_packet_type=0, + breakpoint_kind=1): + self.test_sequence.add_log_lines( + [ # Remove the breakpoint. + "read packet: $z{2},{0:x},{1}#00".format( + address, breakpoint_kind, z_packet_type), + # Verify the stub could unset it. + "send packet: $OK#00", + ], True) + + def add_qSupported_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qSupported#00", + {"direction": "send", "regex": r"^\$(.*)#[0-9a-fA-F]{2}", "capture": {1: "qSupported_response"}}, + ], True) + + _KNOWN_QSUPPORTED_STUB_FEATURES = [ + "augmented-libraries-svr4-read", + "PacketSize", + "QStartNoAckMode", + "QThreadSuffixSupported", + "QListThreadsInStopReply", + "qXfer:auxv:read", + "qXfer:libraries:read", + "qXfer:libraries-svr4:read", + "qXfer:features:read", + "qEcho", + "QPassSignals" + ] + + def parse_qSupported_response(self, context): + self.assertIsNotNone(context) + + raw_response = context.get("qSupported_response") + self.assertIsNotNone(raw_response) + + # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the + # +,-,? is stripped from the key and set as the value. + supported_dict = {} + for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response): + key = match.group(1) + val = match.group(3) + + # key=val: store as is + if val and len(val) > 0: + supported_dict[key] = val + else: + if len(key) < 2: + raise Exception( + "singular stub feature is too short: must be stub_feature{+,-,?}") + supported_type = key[-1] + key = key[:-1] + if not supported_type in ["+", "-", "?"]: + raise Exception( + "malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type)) + supported_dict[key] = supported_type + # Ensure we know the supported element + if key not in self._KNOWN_QSUPPORTED_STUB_FEATURES: + raise Exception( + "unknown qSupported stub feature reported: %s" % + key) + + return supported_dict + + def run_process_then_stop(self, run_seconds=1): + # Tell the stub to continue. + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8"], + True) + context = self.expect_gdbremote_sequence() + + # Wait for run_seconds. + time.sleep(run_seconds) + + # Send an interrupt, capture a T response. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: {}".format(chr(3)), + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture": {1: "stop_result"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + self.assertIsNotNone(context.get("stop_result")) + + return context + + def select_modifiable_register(self, reg_infos): + """Find a register that can be read/written freely.""" + PREFERRED_REGISTER_NAMES = set(["rax", ]) + + # First check for the first register from the preferred register name + # set. + alternative_register_index = None + + self.assertIsNotNone(reg_infos) + for reg_info in reg_infos: + if ("name" in reg_info) and ( + reg_info["name"] in PREFERRED_REGISTER_NAMES): + # We found a preferred register. Use it. + return reg_info["lldb_register_index"] + if ("generic" in reg_info) and (reg_info["generic"] == "fp" or + reg_info["generic"] == "arg1"): + # A frame pointer or first arg register will do as a + # register to modify temporarily. + alternative_register_index = reg_info["lldb_register_index"] + + # We didn't find a preferred register. Return whatever alternative register + # we found, if any. + return alternative_register_index + + def extract_registers_from_stop_notification(self, stop_key_vals_text): + self.assertIsNotNone(stop_key_vals_text) + kv_dict = self.parse_key_val_dict(stop_key_vals_text) + + registers = {} + for (key, val) in list(kv_dict.items()): + if re.match(r"^[0-9a-fA-F]+$", key): + registers[int(key, 16)] = val + return registers + + def gather_register_infos(self): + self.reset_test_sequence() + self.add_register_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + return reg_infos + + def find_generic_register_with_name(self, reg_infos, generic_name): + self.assertIsNotNone(reg_infos) + for reg_info in reg_infos: + if ("generic" in reg_info) and ( + reg_info["generic"] == generic_name): + return reg_info + return None + + def decode_gdbremote_binary(self, encoded_bytes): + decoded_bytes = "" + i = 0 + while i < len(encoded_bytes): + if encoded_bytes[i] == "}": + # Handle escaped char. + self.assertTrue(i + 1 < len(encoded_bytes)) + decoded_bytes += chr(ord(encoded_bytes[i + 1]) ^ 0x20) + i += 2 + elif encoded_bytes[i] == "*": + # Handle run length encoding. + self.assertTrue(len(decoded_bytes) > 0) + self.assertTrue(i + 1 < len(encoded_bytes)) + repeat_count = ord(encoded_bytes[i + 1]) - 29 + decoded_bytes += decoded_bytes[-1] * repeat_count + i += 2 + else: + decoded_bytes += encoded_bytes[i] + i += 1 + return decoded_bytes + + def build_auxv_dict(self, endian, word_size, auxv_data): + self.assertIsNotNone(endian) + self.assertIsNotNone(word_size) + self.assertIsNotNone(auxv_data) + + auxv_dict = {} + + # PowerPC64le's auxvec has a special key that must be ignored. + # This special key may be used multiple times, resulting in + # multiple key/value pairs with the same key, which would otherwise + # break this test check for repeated keys. + # + # AT_IGNOREPPC = 22 + ignored_keys_for_arch = { 'powerpc64le' : [22] } + arch = self.getArchitecture() + ignore_keys = None + if arch in ignored_keys_for_arch: + ignore_keys = ignored_keys_for_arch[arch] + + while len(auxv_data) > 0: + # Chop off key. + raw_key = auxv_data[:word_size] + auxv_data = auxv_data[word_size:] + + # Chop of value. + raw_value = auxv_data[:word_size] + auxv_data = auxv_data[word_size:] + + # Convert raw text from target endian. + key = unpack_endian_binary_string(endian, raw_key) + value = unpack_endian_binary_string(endian, raw_value) + + if ignore_keys and key in ignore_keys: + continue + + # Handle ending entry. + if key == 0: + self.assertEqual(value, 0) + return auxv_dict + + # The key should not already be present. + self.assertFalse(key in auxv_dict) + auxv_dict[key] = value + + self.fail( + "should not reach here - implies required double zero entry not found") + return auxv_dict + + def read_binary_data_in_chunks(self, command_prefix, chunk_length): + """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned.""" + offset = 0 + done = False + decoded_data = "" + + while not done: + # Grab the next iteration of data. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: ${}{:x},{:x}:#00".format( + command_prefix, + offset, + chunk_length), + { + "direction": "send", + "regex": re.compile( + r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", + re.MULTILINE | re.DOTALL), + "capture": { + 1: "response_type", + 2: "content_raw"}}], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + response_type = context.get("response_type") + self.assertIsNotNone(response_type) + self.assertTrue(response_type in ["l", "m"]) + + # Move offset along. + offset += chunk_length + + # Figure out if we're done. We're done if the response type is l. + done = response_type == "l" + + # Decode binary data. + content_raw = context.get("content_raw") + if content_raw and len(content_raw) > 0: + self.assertIsNotNone(content_raw) + decoded_data += self.decode_gdbremote_binary(content_raw) + return decoded_data + + def add_interrupt_packets(self): + self.test_sequence.add_log_lines([ + # Send the intterupt. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_signo", + 2: "stop_key_val_text"}}, + ], True) + + def parse_interrupt_packets(self, context): + self.assertIsNotNone(context.get("stop_signo")) + self.assertIsNotNone(context.get("stop_key_val_text")) + return (int(context["stop_signo"], 16), self.parse_key_val_dict( + context["stop_key_val_text"])) + + def add_QSaveRegisterState_packets(self, thread_id): + if thread_id: + # Use the thread suffix form. + request = "read packet: $QSaveRegisterState;thread:{:x}#00".format( + thread_id) + else: + request = "read packet: $QSaveRegisterState#00" + + self.test_sequence.add_log_lines([request, + {"direction": "send", + "regex": r"^\$(E?.*)#[0-9a-fA-F]{2}$", + "capture": {1: "save_response"}}, + ], + True) + + def parse_QSaveRegisterState_response(self, context): + self.assertIsNotNone(context) + + save_response = context.get("save_response") + self.assertIsNotNone(save_response) + + if len(save_response) < 1 or save_response[0] == "E": + # error received + return (False, None) + else: + return (True, int(save_response)) + + def add_QRestoreRegisterState_packets(self, save_id, thread_id=None): + if thread_id: + # Use the thread suffix form. + request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format( + save_id, thread_id) + else: + request = "read packet: $QRestoreRegisterState:{}#00".format( + save_id) + + self.test_sequence.add_log_lines([ + request, + "send packet: $OK#00" + ], True) + + def flip_all_bits_in_each_register_value( + self, reg_infos, endian, thread_id=None): + self.assertIsNotNone(reg_infos) + + successful_writes = 0 + failed_writes = 0 + + for reg_info in reg_infos: + # Use the lldb register index added to the reg info. We're not necessarily + # working off a full set of register infos, so an inferred register + # index could be wrong. + reg_index = reg_info["lldb_register_index"] + self.assertIsNotNone(reg_index) + + reg_byte_size = int(reg_info["bitsize"]) // 8 + self.assertTrue(reg_byte_size > 0) + + # Handle thread suffix. + if thread_id: + p_request = "read packet: $p{:x};thread:{:x}#00".format( + reg_index, thread_id) + else: + p_request = "read packet: $p{:x}#00".format(reg_index) + + # Read the existing value. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the response length. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + initial_reg_value = unpack_register_hex_unsigned( + endian, p_response) + + # Flip the value by xoring with all 1s + all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) // 8) + flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16) + # print("reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)) + + # Handle thread suffix for P. + if thread_id: + P_request = "read packet: $P{:x}={};thread:{:x}#00".format( + reg_index, pack_register_hex( + endian, flipped_bits_int, byte_size=reg_byte_size), thread_id) + else: + P_request = "read packet: $P{:x}={}#00".format( + reg_index, pack_register_hex( + endian, flipped_bits_int, byte_size=reg_byte_size)) + + # Write the flipped value to the register. + self.reset_test_sequence() + self.test_sequence.add_log_lines([P_request, + {"direction": "send", + "regex": r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", + "capture": {1: "P_response"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail + # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them + # all flipping perfectly. + P_response = context.get("P_response") + self.assertIsNotNone(P_response) + if P_response == "OK": + successful_writes += 1 + else: + failed_writes += 1 + # print("reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)) + + # Read back the register value, ensure it matches the flipped + # value. + if P_response == "OK": + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + verify_p_response_raw = context.get("p_response") + self.assertIsNotNone(verify_p_response_raw) + verify_bits = unpack_register_hex_unsigned( + endian, verify_p_response_raw) + + if verify_bits != flipped_bits_int: + # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts. + # print("reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)) + successful_writes -= 1 + failed_writes += 1 + + return (successful_writes, failed_writes) + + def is_bit_flippable_register(self, reg_info): + if not reg_info: + return False + if not "set" in reg_info: + return False + if reg_info["set"] != "General Purpose Registers": + return False + if ("container-regs" in reg_info) and ( + len(reg_info["container-regs"]) > 0): + # Don't try to bit flip registers contained in another register. + return False + if re.match("^.s$", reg_info["name"]): + # This is a 2-letter register name that ends in "s", like a segment register. + # Don't try to bit flip these. + return False + if re.match("^(c|)psr$", reg_info["name"]): + # This is an ARM program status register; don't flip it. + return False + # Okay, this looks fine-enough. + return True + + def read_register_values(self, reg_infos, endian, thread_id=None): + self.assertIsNotNone(reg_infos) + values = {} + + for reg_info in reg_infos: + # We append a register index when load reg infos so we can work + # with subsets. + reg_index = reg_info.get("lldb_register_index") + self.assertIsNotNone(reg_index) + + # Handle thread suffix. + if thread_id: + p_request = "read packet: $p{:x};thread:{:x}#00".format( + reg_index, thread_id) + else: + p_request = "read packet: $p{:x}#00".format(reg_index) + + # Read it with p. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Convert value from target endian to integral. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + self.assertTrue(len(p_response) > 0) + self.assertFalse(p_response[0] == "E") + + values[reg_index] = unpack_register_hex_unsigned( + endian, p_response) + + return values + + def add_vCont_query_packets(self): + self.test_sequence.add_log_lines(["read packet: $vCont?#49", + {"direction": "send", + "regex": r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", + "capture": {2: "vCont_query_response"}}, + ], + True) + + def parse_vCont_query_response(self, context): + self.assertIsNotNone(context) + vCont_query_response = context.get("vCont_query_response") + + # Handle case of no vCont support at all - in which case the capture + # group will be none or zero length. + if not vCont_query_response or len(vCont_query_response) == 0: + return {} + + return {key: 1 for key in vCont_query_response.split( + ";") if key and len(key) > 0} + + def count_single_steps_until_true( + self, + thread_id, + predicate, + args, + max_step_count=100, + use_Hc_packet=True, + step_instruction="s"): + """Used by single step test that appears in a few different contexts.""" + single_step_count = 0 + + while single_step_count < max_step_count: + self.assertIsNotNone(thread_id) + + # Build the packet for the single step instruction. We replace + # {thread}, if present, with the thread_id. + step_packet = "read packet: ${}#00".format( + re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction)) + # print("\nstep_packet created: {}\n".format(step_packet)) + + # Single step. + self.reset_test_sequence() + if use_Hc_packet: + self.test_sequence.add_log_lines( + [ # Set the continue thread. + "read packet: $Hc{0:x}#00".format(thread_id), + "send packet: $OK#00", + ], True) + self.test_sequence.add_log_lines([ + # Single step. + step_packet, + # "read packet: $vCont;s:{0:x}#00".format(thread_id), + # Expect a breakpoint stop report. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", + "capture": {1: "stop_signo", + 2: "stop_thread_id"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + self.assertIsNotNone(context.get("stop_signo")) + self.assertEqual(int(context.get("stop_signo"), 16), + lldbutil.get_signal_number('SIGTRAP')) + + single_step_count += 1 + + # See if the predicate is true. If so, we're done. + if predicate(args): + return (True, single_step_count) + + # The predicate didn't return true within the runaway step count. + return (False, single_step_count) + + def g_c1_c2_contents_are(self, args): + """Used by single step test that appears in a few different contexts.""" + g_c1_address = args["g_c1_address"] + g_c2_address = args["g_c2_address"] + expected_g_c1 = args["expected_g_c1"] + expected_g_c2 = args["expected_g_c2"] + + # Read g_c1 and g_c2 contents. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "g_c1_contents"}}, + "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "g_c2_contents"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Check if what we read from inferior memory is what we are expecting. + self.assertIsNotNone(context.get("g_c1_contents")) + self.assertIsNotNone(context.get("g_c2_contents")) + + return (seven.unhexlify(context.get("g_c1_contents")) == expected_g_c1) and ( + seven.unhexlify(context.get("g_c2_contents")) == expected_g_c2) + + def single_step_only_steps_one_instruction( + self, use_Hc_packet=True, step_instruction="s"): + """Used by single step test that appears in a few different contexts.""" + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "get-code-address-hex:swap_chars", + "get-data-address-hex:g_c1", + "get-data-address-hex:g_c2", + "sleep:1", + "call-function:swap_chars", + "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the function call entry point. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$", + "capture": {1: "function_address", 2: "g_c1_address", 3: "g_c2_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the main thread id. + self.assertIsNotNone(context.get("stop_thread_id")) + main_thread_id = int(context.get("stop_thread_id"), 16) + + # Grab the function address. + self.assertIsNotNone(context.get("function_address")) + function_address = int(context.get("function_address"), 16) + + # Grab the data addresses. + self.assertIsNotNone(context.get("g_c1_address")) + g_c1_address = int(context.get("g_c1_address"), 16) + + self.assertIsNotNone(context.get("g_c2_address")) + g_c2_address = int(context.get("g_c2_address"), 16) + + # Set a breakpoint at the given address. + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + BREAKPOINT_KIND = 4 + else: + BREAKPOINT_KIND = 1 + self.reset_test_sequence() + self.add_set_breakpoint_packets( + function_address, + do_continue=True, + breakpoint_kind=BREAKPOINT_KIND) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Remove the breakpoint. + self.reset_test_sequence() + self.add_remove_breakpoint_packets( + function_address, breakpoint_kind=BREAKPOINT_KIND) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify g_c1 and g_c2 match expected initial state. + args = {} + args["g_c1_address"] = g_c1_address + args["g_c2_address"] = g_c2_address + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "1" + + self.assertTrue(self.g_c1_c2_contents_are(args)) + + # Verify we take only a small number of steps to hit the first state. + # Might need to work through function entry prologue code. + args["expected_g_c1"] = "1" + args["expected_g_c2"] = "1" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=25, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + + # Verify we hit the next state. + args["expected_g_c1"] = "1" + args["expected_g_c2"] = "0" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=5, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + expected_step_count = 1 + arch = self.getArchitecture() + + # MIPS required "3" (ADDIU, SB, LD) machine instructions for updation + # of variable value + if re.match("mips", arch): + expected_step_count = 3 + # S390X requires "2" (LARL, MVI) machine instructions for updation of + # variable value + if re.match("s390x", arch): + expected_step_count = 2 + self.assertEqual(step_count, expected_step_count) + + # Verify we hit the next state. + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "0" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=5, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEqual(step_count, expected_step_count) + + # Verify we hit the next state. + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "1" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=5, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEqual(step_count, expected_step_count) + + def maybe_strict_output_regex(self, regex): + return '.*' + regex + \ + '.*' if lldbplatformutil.hasChattyStderr(self) else '^' + regex + '$' + + def install_and_create_launch_args(self): + exe_path = self.getBuildArtifact("a.out") + if not lldb.remote_platform: + return [exe_path] + remote_path = lldbutil.append_to_process_working_directory(self, + os.path.basename(exe_path)) + remote_file_spec = lldb.SBFileSpec(remote_path, False) + err = lldb.remote_platform.Install(lldb.SBFileSpec(exe_path, True), + remote_file_spec) + if err.Fail(): + raise Exception("remote_platform.Install('%s', '%s') failed: %s" % + (exe_path, remote_path, err)) + return [remote_path] Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/Makefile @@ -0,0 +1,8 @@ +LEVEL = ../../../make + +CFLAGS_EXTRAS += -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS -std=c++11 +# LD_EXTRAS := -lpthread +CXX_SOURCES := main.cpp +MAKE_DSYM :=NO + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py @@ -0,0 +1,46 @@ +from __future__ import print_function + + +import gdbremote_testcase +import signal +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def inferior_abort_received(self): + procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"]) + self.assertIsNotNone(procs) + + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEqual(int(hex_exit_code, 16), + lldbutil.get_signal_number('SIGABRT')) + + @debugserver_test + def test_inferior_abort_received_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_abort_received() + + @llgs_test + # std::abort() on <= API 16 raises SIGSEGV - b.android.com/179836 + @expectedFailureAndroid(api_levels=list(range(16 + 1))) + def test_inferior_abort_received_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_abort_received() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py @@ -0,0 +1,46 @@ +from __future__ import print_function + + +import gdbremote_testcase +import signal +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteSegFault(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + GDB_REMOTE_STOP_CODE_BAD_ACCESS = 0x91 + + @skipIfDarwinEmbedded # lldb-server tests not updated to work on ios etc yet + def inferior_seg_fault_received(self, expected_signo): + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["segfault"]) + self.assertIsNotNone(procs) + + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEqual(int(hex_exit_code, 16), expected_signo) + + @debugserver_test + def test_inferior_seg_fault_received_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_seg_fault_received(self.GDB_REMOTE_STOP_CODE_BAD_ACCESS) + + @llgs_test + def test_inferior_seg_fault_received_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_seg_fault_received(lldbutil.get_signal_number('SIGSEGV')) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/main.cpp @@ -0,0 +1,31 @@ +#include +#include +#include + +namespace { +const char *const SEGFAULT_COMMAND = "segfault"; +const char *const ABORT_COMMAND = "abort"; +} + +int main(int argc, char **argv) { + if (argc < 2) { + std::cout << "expected at least one command provided on the command line" + << std::endl; + } + + // Process command line args. + for (int i = 1; i < argc; ++i) { + const char *const command = argv[i]; + if (std::strstr(command, SEGFAULT_COMMAND)) { + // Perform a null pointer access. + int *const null_int_ptr = nullptr; + *null_int_ptr = 0xDEAD; + } else if (std::strstr(command, ABORT_COMMAND)) { + std::abort(); + } else { + std::cout << "Unsupported command: " << command << std::endl; + } + } + + return 0; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/lldbgdbserverutils.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/lldbgdbserverutils.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/lldbgdbserverutils.py @@ -0,0 +1,945 @@ +"""Module for supporting unit testing of the lldb-server debug monitor exe. +""" + +from __future__ import division, print_function + + +import os +import os.path +import platform +import re +import six +import socket_packet_pump +import subprocess +import time +from lldbsuite.test.lldbtest import * + +from six.moves import queue + + +def _get_debug_monitor_from_lldb(lldb_exe, debug_monitor_basename): + """Return the debug monitor exe path given the lldb exe path. + + This method attempts to construct a valid debug monitor exe name + from a given lldb exe name. It will return None if the synthesized + debug monitor name is not found to exist. + + The debug monitor exe path is synthesized by taking the directory + of the lldb exe, and replacing the portion of the base name that + matches "lldb" (case insensitive) and replacing with the value of + debug_monitor_basename. + + Args: + lldb_exe: the path to an lldb executable. + + debug_monitor_basename: the base name portion of the debug monitor + that will replace 'lldb'. + + Returns: + A path to the debug monitor exe if it is found to exist; otherwise, + returns None. + + """ + if not lldb_exe: + return None + + exe_dir = os.path.dirname(lldb_exe) + exe_base = os.path.basename(lldb_exe) + + # we'll rebuild the filename by replacing lldb with + # the debug monitor basename, keeping any prefix or suffix in place. + regex = re.compile(r"lldb", re.IGNORECASE) + new_base = regex.sub(debug_monitor_basename, exe_base) + + debug_monitor_exe = os.path.join(exe_dir, new_base) + if os.path.exists(debug_monitor_exe): + return debug_monitor_exe + + new_base = regex.sub( + 'LLDB.framework/Versions/A/Resources/' + + debug_monitor_basename, + exe_base) + debug_monitor_exe = os.path.join(exe_dir, new_base) + if os.path.exists(debug_monitor_exe): + return debug_monitor_exe + + return None + + +def get_lldb_server_exe(): + """Return the lldb-server exe path. + + Returns: + A path to the lldb-server exe if it is found to exist; otherwise, + returns None. + """ + if "LLDB_DEBUGSERVER_PATH" in os.environ: + return os.environ["LLDB_DEBUGSERVER_PATH"] + + return _get_debug_monitor_from_lldb( + lldbtest_config.lldbExec, "lldb-server") + + +def get_debugserver_exe(): + """Return the debugserver exe path. + + Returns: + A path to the debugserver exe if it is found to exist; otherwise, + returns None. + """ + if "LLDB_DEBUGSERVER_PATH" in os.environ: + return os.environ["LLDB_DEBUGSERVER_PATH"] + + return _get_debug_monitor_from_lldb( + lldbtest_config.lldbExec, "debugserver") + +_LOG_LINE_REGEX = re.compile(r'^(lldb-server|debugserver)\s+<\s*(\d+)>' + + '\s+(read|send)\s+packet:\s+(.+)$') + + +def _is_packet_lldb_gdbserver_input(packet_type, llgs_input_is_read): + """Return whether a given packet is input for lldb-gdbserver. + + Args: + packet_type: a string indicating 'send' or 'receive', from a + gdbremote packet protocol log. + + llgs_input_is_read: true if lldb-gdbserver input (content sent to + lldb-gdbserver) is listed as 'read' or 'send' in the packet + log entry. + + Returns: + True if the packet should be considered input for lldb-gdbserver; False + otherwise. + """ + if packet_type == 'read': + # when llgs is the read side, then a read packet is meant for + # input to llgs (when captured from the llgs/debugserver exe). + return llgs_input_is_read + elif packet_type == 'send': + # when llgs is the send side, then a send packet is meant to + # be input to llgs (when captured from the lldb exe). + return not llgs_input_is_read + else: + # don't understand what type of packet this is + raise "Unknown packet type: {}".format(packet_type) + + +def handle_O_packet(context, packet_contents, logger): + """Handle O packets.""" + if (not packet_contents) or (len(packet_contents) < 1): + return False + elif packet_contents[0] != "O": + return False + elif packet_contents == "OK": + return False + + new_text = gdbremote_hex_decode_string(packet_contents[1:]) + context["O_content"] += new_text + context["O_count"] += 1 + + if logger: + logger.debug( + "text: new \"{}\", cumulative: \"{}\"".format( + new_text, context["O_content"])) + + return True + +_STRIP_CHECKSUM_REGEX = re.compile(r'#[0-9a-fA-F]{2}$') +_STRIP_COMMAND_PREFIX_REGEX = re.compile(r"^\$") +_STRIP_COMMAND_PREFIX_M_REGEX = re.compile(r"^\$m") + + +def assert_packets_equal(asserter, actual_packet, expected_packet): + # strip off the checksum digits of the packet. When we're in + # no-ack mode, the # checksum is ignored, and should not be cause + # for a mismatched packet. + actual_stripped = _STRIP_CHECKSUM_REGEX.sub('', actual_packet) + expected_stripped = _STRIP_CHECKSUM_REGEX.sub('', expected_packet) + asserter.assertEqual(actual_stripped, expected_stripped) + + +def expect_lldb_gdbserver_replay( + asserter, + sock, + test_sequence, + pump_queues, + timeout_seconds, + logger=None): + """Replay socket communication with lldb-gdbserver and verify responses. + + Args: + asserter: the object providing assertEqual(first, second, msg=None), e.g. TestCase instance. + + sock: the TCP socket connected to the lldb-gdbserver exe. + + test_sequence: a GdbRemoteTestSequence instance that describes + the messages sent to the gdb remote and the responses + expected from it. + + timeout_seconds: any response taking more than this number of + seconds will cause an exception to be raised. + + logger: a Python logger instance. + + Returns: + The context dictionary from running the given gdbremote + protocol sequence. This will contain any of the capture + elements specified to any GdbRemoteEntry instances in + test_sequence. + + The context will also contain an entry, context["O_content"] + which contains the text from the inferior received via $O + packets. $O packets should not attempt to be matched + directly since they are not entirely deterministic as to + how many arrive and how much text is in each one. + + context["O_count"] will contain an integer of the number of + O packets received. + """ + + # Ensure we have some work to do. + if len(test_sequence.entries) < 1: + return {} + + context = {"O_count": 0, "O_content": ""} + with socket_packet_pump.SocketPacketPump(sock, pump_queues, logger) as pump: + # Grab the first sequence entry. + sequence_entry = test_sequence.entries.pop(0) + + # While we have an active sequence entry, send messages + # destined for the stub and collect/match/process responses + # expected from the stub. + while sequence_entry: + if sequence_entry.is_send_to_remote(): + # This is an entry to send to the remote debug monitor. + send_packet = sequence_entry.get_send_packet() + if logger: + if len(send_packet) == 1 and send_packet[0] == chr(3): + packet_desc = "^C" + else: + packet_desc = send_packet + logger.info( + "sending packet to remote: {}".format(packet_desc)) + sock.sendall(send_packet.encode()) + else: + # This is an entry expecting to receive content from the remote + # debug monitor. + + # We'll pull from (and wait on) the queue appropriate for the type of matcher. + # We keep separate queues for process output (coming from non-deterministic + # $O packet division) and for all other packets. + if sequence_entry.is_output_matcher(): + try: + # Grab next entry from the output queue. + content = pump_queues.output_queue().get(True, timeout_seconds) + except queue.Empty: + if logger: + logger.warning( + "timeout waiting for stub output (accumulated output:{})".format( + pump.get_accumulated_output())) + raise Exception( + "timed out while waiting for output match (accumulated output: {})".format( + pump.get_accumulated_output())) + else: + try: + content = pump_queues.packet_queue().get(True, timeout_seconds) + except queue.Empty: + if logger: + logger.warning( + "timeout waiting for packet match (receive buffer: {})".format( + pump.get_receive_buffer())) + raise Exception( + "timed out while waiting for packet match (receive buffer: {})".format( + pump.get_receive_buffer())) + + # Give the sequence entry the opportunity to match the content. + # Output matchers might match or pass after more output accumulates. + # Other packet types generally must match. + asserter.assertIsNotNone(content) + context = sequence_entry.assert_match( + asserter, content, context=context) + + # Move on to next sequence entry as needed. Some sequence entries support executing multiple + # times in different states (for looping over query/response + # packets). + if sequence_entry.is_consumed(): + if len(test_sequence.entries) > 0: + sequence_entry = test_sequence.entries.pop(0) + else: + sequence_entry = None + + # Fill in the O_content entries. + context["O_count"] = 1 + context["O_content"] = pump.get_accumulated_output() + + return context + + +def gdbremote_hex_encode_string(str): + output = '' + for c in str: + output += '{0:02x}'.format(ord(c)) + return output + + +def gdbremote_hex_decode_string(str): + return str.decode("hex") + + +def gdbremote_packet_encode_string(str): + checksum = 0 + for c in str: + checksum += ord(c) + return '$' + str + '#{0:02x}'.format(checksum % 256) + + +def build_gdbremote_A_packet(args_list): + """Given a list of args, create a properly-formed $A packet containing each arg. + """ + payload = "A" + + # build the arg content + arg_index = 0 + for arg in args_list: + # Comma-separate the args. + if arg_index > 0: + payload += ',' + + # Hex-encode the arg. + hex_arg = gdbremote_hex_encode_string(arg) + + # Build the A entry. + payload += "{},{},{}".format(len(hex_arg), arg_index, hex_arg) + + # Next arg index, please. + arg_index += 1 + + # return the packetized payload + return gdbremote_packet_encode_string(payload) + + +def parse_reg_info_response(response_packet): + if not response_packet: + raise Exception("response_packet cannot be None") + + # Strip off prefix $ and suffix #xx if present. + response_packet = _STRIP_COMMAND_PREFIX_REGEX.sub("", response_packet) + response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) + + # Build keyval pairs + values = {} + for kv in response_packet.split(";"): + if len(kv) < 1: + continue + (key, val) = kv.split(':') + values[key] = val + + return values + + +def parse_threadinfo_response(response_packet): + if not response_packet: + raise Exception("response_packet cannot be None") + + # Strip off prefix $ and suffix #xx if present. + response_packet = _STRIP_COMMAND_PREFIX_M_REGEX.sub("", response_packet) + response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) + + # Return list of thread ids + return [int(thread_id_hex, 16) for thread_id_hex in response_packet.split( + ",") if len(thread_id_hex) > 0] + + +def unpack_endian_binary_string(endian, value_string): + """Unpack a gdb-remote binary (post-unescaped, i.e. not escaped) response to an unsigned int given endianness of the inferior.""" + if not endian: + raise Exception("endian cannot be None") + if not value_string or len(value_string) < 1: + raise Exception("value_string cannot be None or empty") + + if endian == 'little': + value = 0 + i = 0 + while len(value_string) > 0: + value += (ord(value_string[0]) << i) + value_string = value_string[1:] + i += 8 + return value + elif endian == 'big': + value = 0 + while len(value_string) > 0: + value = (value << 8) + ord(value_string[0]) + value_string = value_string[1:] + return value + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + + +def unpack_register_hex_unsigned(endian, value_string): + """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" + if not endian: + raise Exception("endian cannot be None") + if not value_string or len(value_string) < 1: + raise Exception("value_string cannot be None or empty") + + if endian == 'little': + value = 0 + i = 0 + while len(value_string) > 0: + value += (int(value_string[0:2], 16) << i) + value_string = value_string[2:] + i += 8 + return value + elif endian == 'big': + return int(value_string, 16) + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + + +def pack_register_hex(endian, value, byte_size=None): + """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" + if not endian: + raise Exception("endian cannot be None") + + if endian == 'little': + # Create the litt-endian return value. + retval = "" + while value != 0: + retval = retval + "{:02x}".format(value & 0xff) + value = value >> 8 + if byte_size: + # Add zero-fill to the right/end (MSB side) of the value. + retval += "00" * (byte_size - len(retval) // 2) + return retval + + elif endian == 'big': + retval = "" + while value != 0: + retval = "{:02x}".format(value & 0xff) + retval + value = value >> 8 + if byte_size: + # Add zero-fill to the left/front (MSB side) of the value. + retval = ("00" * (byte_size - len(retval) // 2)) + retval + return retval + + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + + +class GdbRemoteEntryBase(object): + + def is_output_matcher(self): + return False + + +class GdbRemoteEntry(GdbRemoteEntryBase): + + def __init__( + self, + is_send_to_remote=True, + exact_payload=None, + regex=None, + capture=None, + expect_captures=None): + """Create an entry representing one piece of the I/O to/from a gdb remote debug monitor. + + Args: + + is_send_to_remote: True if this entry is a message to be + sent to the gdbremote debug monitor; False if this + entry represents text to be matched against the reply + from the gdbremote debug monitor. + + exact_payload: if not None, then this packet is an exact + send (when sending to the remote) or an exact match of + the response from the gdbremote. The checksums are + ignored on exact match requests since negotiation of + no-ack makes the checksum content essentially + undefined. + + regex: currently only valid for receives from gdbremote. + When specified (and only if exact_payload is None), + indicates the gdbremote response must match the given + regex. Match groups in the regex can be used for two + different purposes: saving the match (see capture + arg), or validating that a match group matches a + previously established value (see expect_captures). It + is perfectly valid to have just a regex arg and to + specify neither capture or expect_captures args. This + arg only makes sense if exact_payload is not + specified. + + capture: if specified, is a dictionary of regex match + group indices (should start with 1) to variable names + that will store the capture group indicated by the + index. For example, {1:"thread_id"} will store capture + group 1's content in the context dictionary where + "thread_id" is the key and the match group value is + the value. The value stored off can be used later in a + expect_captures expression. This arg only makes sense + when regex is specified. + + expect_captures: if specified, is a dictionary of regex + match group indices (should start with 1) to variable + names, where the match group should match the value + existing in the context at the given variable name. + For example, {2:"thread_id"} indicates that the second + match group must match the value stored under the + context's previously stored "thread_id" key. This arg + only makes sense when regex is specified. + """ + self._is_send_to_remote = is_send_to_remote + self.exact_payload = exact_payload + self.regex = regex + self.capture = capture + self.expect_captures = expect_captures + + def is_send_to_remote(self): + return self._is_send_to_remote + + def is_consumed(self): + # For now, all packets are consumed after first use. + return True + + def get_send_packet(self): + if not self.is_send_to_remote(): + raise Exception( + "get_send_packet() called on GdbRemoteEntry that is not a send-to-remote packet") + if not self.exact_payload: + raise Exception( + "get_send_packet() called on GdbRemoteEntry but it doesn't have an exact payload") + return self.exact_payload + + def _assert_exact_payload_match(self, asserter, actual_packet): + assert_packets_equal(asserter, actual_packet, self.exact_payload) + return None + + def _assert_regex_match(self, asserter, actual_packet, context): + # Ensure the actual packet matches from the start of the actual packet. + match = self.regex.match(actual_packet) + if not match: + asserter.fail( + "regex '{}' failed to match against content '{}'".format( + self.regex.pattern, actual_packet)) + + if self.capture: + # Handle captures. + for group_index, var_name in list(self.capture.items()): + capture_text = match.group(group_index) + # It is okay for capture text to be None - which it will be if it is a group that can match nothing. + # The user must be okay with it since the regex itself matched + # above. + context[var_name] = capture_text + + if self.expect_captures: + # Handle comparing matched groups to context dictionary entries. + for group_index, var_name in list(self.expect_captures.items()): + capture_text = match.group(group_index) + if not capture_text: + raise Exception( + "No content to expect for group index {}".format(group_index)) + asserter.assertEqual(capture_text, context[var_name]) + + return context + + def assert_match(self, asserter, actual_packet, context=None): + # This only makes sense for matching lines coming from the + # remote debug monitor. + if self.is_send_to_remote(): + raise Exception( + "Attempted to match a packet being sent to the remote debug monitor, doesn't make sense.") + + # Create a new context if needed. + if not context: + context = {} + + # If this is an exact payload, ensure they match exactly, + # ignoring the packet checksum which is optional for no-ack + # mode. + if self.exact_payload: + self._assert_exact_payload_match(asserter, actual_packet) + return context + elif self.regex: + return self._assert_regex_match(asserter, actual_packet, context) + else: + raise Exception( + "Don't know how to match a remote-sent packet when exact_payload isn't specified.") + + +class MultiResponseGdbRemoteEntry(GdbRemoteEntryBase): + """Represents a query/response style packet. + + Assumes the first item is sent to the gdb remote. + An end sequence regex indicates the end of the query/response + packet sequence. All responses up through (but not including) the + end response are stored in a context variable. + + Settings accepted from params: + + next_query or query: required. The typical query packet without the $ prefix or #xx suffix. + If there is a special first packet to start the iteration query, see the + first_query key. + + first_query: optional. If the first query requires a special query command, specify + it with this key. Do not specify the $ prefix or #xx suffix. + + append_iteration_suffix: defaults to False. Specify True if the 0-based iteration + index should be appended as a suffix to the command. e.g. qRegisterInfo with + this key set true will generate query packets of qRegisterInfo0, qRegisterInfo1, + etc. + + end_regex: required. Specifies a compiled regex object that will match the full text + of any response that signals an end to the iteration. It must include the + initial $ and ending #xx and must match the whole packet. + + save_key: required. Specifies the key within the context where an array will be stored. + Each packet received from the gdb remote that does not match the end_regex will get + appended to the array stored within the context at that key. + + runaway_response_count: optional. Defaults to 10000. If this many responses are retrieved, + assume there is something wrong with either the response collection or the ending + detection regex and throw an exception. + """ + + def __init__(self, params): + self._next_query = params.get("next_query", params.get("query")) + if not self._next_query: + raise "either next_query or query key must be specified for MultiResponseGdbRemoteEntry" + + self._first_query = params.get("first_query", self._next_query) + self._append_iteration_suffix = params.get( + "append_iteration_suffix", False) + self._iteration = 0 + self._end_regex = params["end_regex"] + self._save_key = params["save_key"] + self._runaway_response_count = params.get( + "runaway_response_count", 10000) + self._is_send_to_remote = True + self._end_matched = False + + def is_send_to_remote(self): + return self._is_send_to_remote + + def get_send_packet(self): + if not self.is_send_to_remote(): + raise Exception( + "get_send_packet() called on MultiResponseGdbRemoteEntry that is not in the send state") + if self._end_matched: + raise Exception( + "get_send_packet() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") + + # Choose the first or next query for the base payload. + if self._iteration == 0 and self._first_query: + payload = self._first_query + else: + payload = self._next_query + + # Append the suffix as needed. + if self._append_iteration_suffix: + payload += "%x" % self._iteration + + # Keep track of the iteration. + self._iteration += 1 + + # Now that we've given the query packet, flip the mode to + # receive/match. + self._is_send_to_remote = False + + # Return the result, converted to packet form. + return gdbremote_packet_encode_string(payload) + + def is_consumed(self): + return self._end_matched + + def assert_match(self, asserter, actual_packet, context=None): + # This only makes sense for matching lines coming from the remote debug + # monitor. + if self.is_send_to_remote(): + raise Exception( + "assert_match() called on MultiResponseGdbRemoteEntry but state is set to send a query packet.") + + if self._end_matched: + raise Exception( + "assert_match() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") + + # Set up a context as needed. + if not context: + context = {} + + # Check if the packet matches the end condition. + match = self._end_regex.match(actual_packet) + if match: + # We're done iterating. + self._end_matched = True + return context + + # Not done iterating - save the packet. + context[self._save_key] = context.get(self._save_key, []) + context[self._save_key].append(actual_packet) + + # Check for a runaway response cycle. + if len(context[self._save_key]) >= self._runaway_response_count: + raise Exception( + "runaway query/response cycle detected: %d responses captured so far. Last response: %s" % + (len( + context[ + self._save_key]), context[ + self._save_key][ + -1])) + + # Flip the mode to send for generating the query. + self._is_send_to_remote = True + return context + + +class MatchRemoteOutputEntry(GdbRemoteEntryBase): + """Waits for output from the debug monitor to match a regex or time out. + + This entry type tries to match each time new gdb remote output is accumulated + using a provided regex. If the output does not match the regex within the + given timeframe, the command fails the playback session. If the regex does + match, any capture fields are recorded in the context. + + Settings accepted from params: + + regex: required. Specifies a compiled regex object that must either succeed + with re.match or re.search (see regex_mode below) within the given timeout + (see timeout_seconds below) or cause the playback to fail. + + regex_mode: optional. Available values: "match" or "search". If "match", the entire + stub output as collected so far must match the regex. If search, then the regex + must match starting somewhere within the output text accumulated thus far. + Default: "match" (i.e. the regex must match the entirety of the accumulated output + buffer, so unexpected text will generally fail the match). + + capture: optional. If specified, is a dictionary of regex match group indices (should start + with 1) to variable names that will store the capture group indicated by the + index. For example, {1:"thread_id"} will store capture group 1's content in the + context dictionary where "thread_id" is the key and the match group value is + the value. The value stored off can be used later in a expect_captures expression. + This arg only makes sense when regex is specified. + """ + + def __init__(self, regex=None, regex_mode="match", capture=None): + self._regex = regex + self._regex_mode = regex_mode + self._capture = capture + self._matched = False + + if not self._regex: + raise Exception("regex cannot be None") + + if not self._regex_mode in ["match", "search"]: + raise Exception( + "unsupported regex mode \"{}\": must be \"match\" or \"search\"".format( + self._regex_mode)) + + def is_output_matcher(self): + return True + + def is_send_to_remote(self): + # This is always a "wait for remote" command. + return False + + def is_consumed(self): + return self._matched + + def assert_match(self, asserter, accumulated_output, context): + # Validate args. + if not accumulated_output: + raise Exception("accumulated_output cannot be none") + if not context: + raise Exception("context cannot be none") + + # Validate that we haven't already matched. + if self._matched: + raise Exception( + "invalid state - already matched, attempting to match again") + + # If we don't have any content yet, we don't match. + if len(accumulated_output) < 1: + return context + + # Check if we match + if self._regex_mode == "match": + match = self._regex.match(accumulated_output) + elif self._regex_mode == "search": + match = self._regex.search(accumulated_output) + else: + raise Exception( + "Unexpected regex mode: {}".format( + self._regex_mode)) + + # If we don't match, wait to try again after next $O content, or time + # out. + if not match: + # print("re pattern \"{}\" did not match against \"{}\"".format(self._regex.pattern, accumulated_output)) + return context + + # We do match. + self._matched = True + # print("re pattern \"{}\" matched against \"{}\"".format(self._regex.pattern, accumulated_output)) + + # Collect up any captures into the context. + if self._capture: + # Handle captures. + for group_index, var_name in list(self._capture.items()): + capture_text = match.group(group_index) + if not capture_text: + raise Exception( + "No content for group index {}".format(group_index)) + context[var_name] = capture_text + + return context + + +class GdbRemoteTestSequence(object): + + _LOG_LINE_REGEX = re.compile(r'^.*(read|send)\s+packet:\s+(.+)$') + + def __init__(self, logger): + self.entries = [] + self.logger = logger + + def add_log_lines(self, log_lines, remote_input_is_read): + for line in log_lines: + if isinstance(line, str): + # Handle log line import + # if self.logger: + # self.logger.debug("processing log line: {}".format(line)) + match = self._LOG_LINE_REGEX.match(line) + if match: + playback_packet = match.group(2) + direction = match.group(1) + if _is_packet_lldb_gdbserver_input( + direction, remote_input_is_read): + # Handle as something to send to the remote debug monitor. + # if self.logger: + # self.logger.info("processed packet to send to remote: {}".format(playback_packet)) + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=True, + exact_payload=playback_packet)) + else: + # Log line represents content to be expected from the remote debug monitor. + # if self.logger: + # self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet)) + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=False, + exact_payload=playback_packet)) + else: + raise Exception( + "failed to interpret log line: {}".format(line)) + elif isinstance(line, dict): + entry_type = line.get("type", "regex_capture") + if entry_type == "regex_capture": + # Handle more explicit control over details via dictionary. + direction = line.get("direction", None) + regex = line.get("regex", None) + capture = line.get("capture", None) + expect_captures = line.get("expect_captures", None) + + # Compile the regex. + if regex and (isinstance(regex, str)): + regex = re.compile(regex) + + if _is_packet_lldb_gdbserver_input( + direction, remote_input_is_read): + # Handle as something to send to the remote debug monitor. + # if self.logger: + # self.logger.info("processed dict sequence to send to remote") + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=True, + regex=regex, + capture=capture, + expect_captures=expect_captures)) + else: + # Log line represents content to be expected from the remote debug monitor. + # if self.logger: + # self.logger.info("processed dict sequence to match receiving from remote") + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=False, + regex=regex, + capture=capture, + expect_captures=expect_captures)) + elif entry_type == "multi_response": + self.entries.append(MultiResponseGdbRemoteEntry(line)) + elif entry_type == "output_match": + + regex = line.get("regex", None) + # Compile the regex. + if regex and (isinstance(regex, str)): + regex = re.compile(regex, re.DOTALL) + + regex_mode = line.get("regex_mode", "match") + capture = line.get("capture", None) + self.entries.append( + MatchRemoteOutputEntry( + regex=regex, + regex_mode=regex_mode, + capture=capture)) + else: + raise Exception("unknown entry type \"%s\"" % entry_type) + + +def process_is_running(pid, unknown_value=True): + """If possible, validate that the given pid represents a running process on the local system. + + Args: + + pid: an OS-specific representation of a process id. Should be an integral value. + + unknown_value: value used when we cannot determine how to check running local + processes on the OS. + + Returns: + + If we can figure out how to check running process ids on the given OS: + return True if the process is running, or False otherwise. + + If we don't know how to check running process ids on the given OS: + return the value provided by the unknown_value arg. + """ + if not isinstance(pid, six.integer_types): + raise Exception( + "pid must be an integral type (actual type: %s)" % str( + type(pid))) + + process_ids = [] + + if lldb.remote_platform: + # Don't know how to get list of running process IDs on a remote + # platform + return unknown_value + elif platform.system() in ['Darwin', 'Linux', 'FreeBSD', 'NetBSD']: + # Build the list of running process ids + output = subprocess.check_output( + "ps ax | awk '{ print $1; }'", shell=True).decode("utf-8") + text_process_ids = output.split('\n')[1:] + # Convert text pids to ints + process_ids = [int(text_pid) + for text_pid in text_process_ids if text_pid != ''] + # elif {your_platform_here}: + # fill in process_ids as a list of int type process IDs running on + # the local system. + else: + # Don't know how to get list of running process IDs on this + # OS, so return the "don't know" value. + return unknown_value + + # Check if the pid is in the process_ids + return pid in process_ids + +if __name__ == '__main__': + EXE_PATH = get_lldb_server_exe() + if EXE_PATH: + print("lldb-server path detected: {}".format(EXE_PATH)) + else: + print("lldb-server could not be found") Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/main.cpp @@ -0,0 +1,370 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(_WIN32) +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#if defined(__APPLE__) +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2) +int pthread_threadid_np(pthread_t, __uint64_t *); +#elif defined(__linux__) +#include +#elif defined(__NetBSD__) +#include +#elif defined(_WIN32) +#include +#endif + +static const char *const RETVAL_PREFIX = "retval:"; +static const char *const SLEEP_PREFIX = "sleep:"; +static const char *const STDERR_PREFIX = "stderr:"; +static const char *const SET_MESSAGE_PREFIX = "set-message:"; +static const char *const PRINT_MESSAGE_COMMAND = "print-message:"; +static const char *const GET_DATA_ADDRESS_PREFIX = "get-data-address-hex:"; +static const char *const GET_STACK_ADDRESS_COMMAND = "get-stack-address-hex:"; +static const char *const GET_HEAP_ADDRESS_COMMAND = "get-heap-address-hex:"; + +static const char *const GET_CODE_ADDRESS_PREFIX = "get-code-address-hex:"; +static const char *const CALL_FUNCTION_PREFIX = "call-function:"; + +static const char *const THREAD_PREFIX = "thread:"; +static const char *const THREAD_COMMAND_NEW = "new"; +static const char *const THREAD_COMMAND_PRINT_IDS = "print-ids"; +static const char *const THREAD_COMMAND_SEGFAULT = "segfault"; + +static const char *const PRINT_PID_COMMAND = "print-pid"; + +static bool g_print_thread_ids = false; +static std::mutex g_print_mutex; +static bool g_threads_do_segfault = false; + +static std::mutex g_jump_buffer_mutex; +static jmp_buf g_jump_buffer; +static bool g_is_segfaulting = false; + +static char g_message[256]; + +static volatile char g_c1 = '0'; +static volatile char g_c2 = '1'; + +static void print_pid() { +#if defined(_WIN32) + fprintf(stderr, "PID: %d\n", ::GetCurrentProcessId()); +#else + fprintf(stderr, "PID: %d\n", getpid()); +#endif +} + +static void print_thread_id() { +// Put in the right magic here for your platform to spit out the thread id (tid) +// that debugserver/lldb-gdbserver would see as a TID. Otherwise, let the else +// clause print out the unsupported text so that the unit test knows to skip +// verifying thread ids. +#if defined(__APPLE__) + __uint64_t tid = 0; + pthread_threadid_np(pthread_self(), &tid); + printf("%" PRIx64, tid); +#elif defined(__linux__) + // This is a call to gettid() via syscall. + printf("%" PRIx64, static_cast(syscall(__NR_gettid))); +#elif defined(__NetBSD__) + // Technically lwpid_t is 32-bit signed integer + printf("%" PRIx64, static_cast(_lwp_self())); +#elif defined(_WIN32) + printf("%" PRIx64, static_cast(::GetCurrentThreadId())); +#else + printf("{no-tid-support}"); +#endif +} + +static void signal_handler(int signo) { +#if defined(_WIN32) + // No signal support on Windows. +#else + const char *signal_name = nullptr; + switch (signo) { + case SIGUSR1: + signal_name = "SIGUSR1"; + break; + case SIGSEGV: + signal_name = "SIGSEGV"; + break; + default: + signal_name = nullptr; + } + + // Print notice that we received the signal on a given thread. + { + std::lock_guard lock(g_print_mutex); + if (signal_name) + printf("received %s on thread id: ", signal_name); + else + printf("received signo %d (%s) on thread id: ", signo, strsignal(signo)); + print_thread_id(); + printf("\n"); + } + + // Reset the signal handler if we're one of the expected signal handlers. + switch (signo) { + case SIGSEGV: + if (g_is_segfaulting) { + // Fix up the pointer we're writing to. This needs to happen if nothing + // intercepts the SIGSEGV (i.e. if somebody runs this from the command + // line). + longjmp(g_jump_buffer, 1); + } + break; + case SIGUSR1: + if (g_is_segfaulting) { + // Fix up the pointer we're writing to. This is used to test gdb remote + // signal delivery. A SIGSEGV will be raised when the thread is created, + // switched out for a SIGUSR1, and then this code still needs to fix the + // seg fault. (i.e. if somebody runs this from the command line). + longjmp(g_jump_buffer, 1); + } + break; + } + + // Reset the signal handler. + sig_t sig_result = signal(signo, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set signal handler: errno=%d\n", errno); + exit(1); + } +#endif +} + +static void swap_chars() { + g_c1 = '1'; + g_c2 = '0'; + + g_c1 = '0'; + g_c2 = '1'; +} + +static void hello() { + std::lock_guard lock(g_print_mutex); + printf("hello, world\n"); +} + +static void *thread_func(void *arg) { + static std::atomic s_thread_index(1); + const int this_thread_index = s_thread_index++; + if (g_print_thread_ids) { + std::lock_guard lock(g_print_mutex); + printf("thread %d id: ", this_thread_index); + print_thread_id(); + printf("\n"); + } + + if (g_threads_do_segfault) { + // Sleep for a number of seconds based on the thread index. + // TODO add ability to send commands to test exe so we can + // handle timing more precisely. This is clunky. All we're + // trying to do is add predictability as to the timing of + // signal generation by created threads. + int sleep_seconds = 2 * (this_thread_index - 1); + std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds)); + + // Test creating a SEGV. + { + std::lock_guard lock(g_jump_buffer_mutex); + g_is_segfaulting = true; + int *bad_p = nullptr; + if (setjmp(g_jump_buffer) == 0) { + // Force a seg fault signal on this thread. + *bad_p = 0; + } else { + // Tell the system we're no longer seg faulting. + // Used by the SIGUSR1 signal handler that we inject + // in place of the SIGSEGV so it only tries to + // recover from the SIGSEGV if this seg fault code + // was in play. + g_is_segfaulting = false; + } + } + + { + std::lock_guard lock(g_print_mutex); + printf("thread "); + print_thread_id(); + printf(": past SIGSEGV\n"); + } + } + + int sleep_seconds_remaining = 60; + std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds_remaining)); + + return nullptr; +} + +int main(int argc, char **argv) { + lldb_enable_attach(); + + std::vector threads; + std::unique_ptr heap_array_up; + int return_value = 0; + +#if !defined(_WIN32) + // Set the signal handler. + sig_t sig_result = signal(SIGALRM, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set SIGALRM signal handler: errno=%d\n", errno); + exit(1); + } + + sig_result = signal(SIGUSR1, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); + exit(1); + } + + sig_result = signal(SIGSEGV, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); + exit(1); + } +#endif + + // Process command line args. + for (int i = 1; i < argc; ++i) { + if (std::strstr(argv[i], STDERR_PREFIX)) { + // Treat remainder as text to go to stderr. + fprintf(stderr, "%s\n", (argv[i] + strlen(STDERR_PREFIX))); + } else if (std::strstr(argv[i], RETVAL_PREFIX)) { + // Treat as the return value for the program. + return_value = std::atoi(argv[i] + strlen(RETVAL_PREFIX)); + } else if (std::strstr(argv[i], SLEEP_PREFIX)) { + // Treat as the amount of time to have this process sleep (in seconds). + int sleep_seconds_remaining = std::atoi(argv[i] + strlen(SLEEP_PREFIX)); + + // Loop around, sleeping until all sleep time is used up. Note that + // signals will cause sleep to end early with the number of seconds + // remaining. + std::this_thread::sleep_for( + std::chrono::seconds(sleep_seconds_remaining)); + + } else if (std::strstr(argv[i], SET_MESSAGE_PREFIX)) { + // Copy the contents after "set-message:" to the g_message buffer. + // Used for reading inferior memory and verifying contents match + // expectations. + strncpy(g_message, argv[i] + strlen(SET_MESSAGE_PREFIX), + sizeof(g_message)); + + // Ensure we're null terminated. + g_message[sizeof(g_message) - 1] = '\0'; + + } else if (std::strstr(argv[i], PRINT_MESSAGE_COMMAND)) { + std::lock_guard lock(g_print_mutex); + printf("message: %s\n", g_message); + } else if (std::strstr(argv[i], GET_DATA_ADDRESS_PREFIX)) { + volatile void *data_p = nullptr; + + if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_message")) + data_p = &g_message[0]; + else if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_c1")) + data_p = &g_c1; + else if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_c2")) + data_p = &g_c2; + + std::lock_guard lock(g_print_mutex); + printf("data address: %p\n", data_p); + } else if (std::strstr(argv[i], GET_HEAP_ADDRESS_COMMAND)) { + // Create a byte array if not already present. + if (!heap_array_up) + heap_array_up.reset(new uint8_t[32]); + + std::lock_guard lock(g_print_mutex); + printf("heap address: %p\n", heap_array_up.get()); + + } else if (std::strstr(argv[i], GET_STACK_ADDRESS_COMMAND)) { + std::lock_guard lock(g_print_mutex); + printf("stack address: %p\n", &return_value); + } else if (std::strstr(argv[i], GET_CODE_ADDRESS_PREFIX)) { + void (*func_p)() = nullptr; + + if (std::strstr(argv[i] + strlen(GET_CODE_ADDRESS_PREFIX), "hello")) + func_p = hello; + else if (std::strstr(argv[i] + strlen(GET_CODE_ADDRESS_PREFIX), + "swap_chars")) + func_p = swap_chars; + + std::lock_guard lock(g_print_mutex); + printf("code address: %p\n", func_p); + } else if (std::strstr(argv[i], CALL_FUNCTION_PREFIX)) { + void (*func_p)() = nullptr; + + // Defaut to providing the address of main. + if (std::strcmp(argv[i] + strlen(CALL_FUNCTION_PREFIX), "hello") == 0) + func_p = hello; + else if (std::strcmp(argv[i] + strlen(CALL_FUNCTION_PREFIX), + "swap_chars") == 0) + func_p = swap_chars; + else { + std::lock_guard lock(g_print_mutex); + printf("unknown function: %s\n", + argv[i] + strlen(CALL_FUNCTION_PREFIX)); + } + if (func_p) + func_p(); + } else if (std::strstr(argv[i], THREAD_PREFIX)) { + // Check if we're creating a new thread. + if (std::strstr(argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_NEW)) { + threads.push_back(std::thread(thread_func, nullptr)); + } else if (std::strstr(argv[i] + strlen(THREAD_PREFIX), + THREAD_COMMAND_PRINT_IDS)) { + // Turn on thread id announcing. + g_print_thread_ids = true; + + // And announce us. + { + std::lock_guard lock(g_print_mutex); + printf("thread 0 id: "); + print_thread_id(); + printf("\n"); + } + } else if (std::strstr(argv[i] + strlen(THREAD_PREFIX), + THREAD_COMMAND_SEGFAULT)) { + g_threads_do_segfault = true; + } else { + // At this point we don't do anything else with threads. + // Later use thread index and send command to thread. + } + } else if (std::strstr(argv[i], PRINT_PID_COMMAND)) { + print_pid(); + } else { + // Treat the argument as text for stdout. + printf("%s\n", argv[i]); + } + } + + // If we launched any threads, join them + for (std::vector::iterator it = threads.begin(); + it != threads.end(); ++it) + it->join(); + + return return_value; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/TestPlatformProcessConnect.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/TestPlatformProcessConnect.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/TestPlatformProcessConnect.py @@ -0,0 +1,96 @@ +from __future__ import print_function + +import time + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestPlatformProcessConnect(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + @llgs_test + @no_debug_info_test + @skipIf(remote=False) + @expectedFailureAll(hostoslist=["windows"], triple='.*-android') + def test_platform_process_connect(self): + self.build() + self.init_llgs_test(False) + + working_dir = lldb.remote_platform.GetWorkingDirectory() + src = lldb.SBFileSpec(self.getBuildArtifact("a.out")) + dest = lldb.SBFileSpec(os.path.join(working_dir, "a.out")) + err = lldb.remote_platform.Put(src, dest) + if err.Fail(): + raise RuntimeError( + "Unable copy '%s' to '%s'.\n>>> %s" % + (f, wd, err.GetCString())) + + m = re.search("^(.*)://([^:/]*)", configuration.lldb_platform_url) + protocol = m.group(1) + hostname = m.group(2) + unix_protocol = protocol.startswith("unix-") + if unix_protocol: + p = re.search("^(.*)-connect", protocol) + path = lldbutil.join_remote_paths(configuration.lldb_platform_working_dir, + self.getBuildDirBasename(), "platform-%d.sock" % int(time.time())) + listen_url = "%s://%s" % (p.group(1), path) + else: + listen_url = "*:0" + + port_file = "%s/port" % working_dir + commandline_args = [ + "platform", + "--listen", + listen_url, + "--socket-file", + port_file, + "--", + "%s/a.out" % + working_dir, + "foo"] + self.spawnSubprocess( + self.debug_monitor_exe, + commandline_args, + install_remote=False) + self.addTearDownHook(self.cleanupSubprocesses) + + socket_id = lldbutil.wait_for_file_on_target(self, port_file) + + new_debugger = lldb.SBDebugger.Create() + new_debugger.SetAsync(False) + + def del_debugger(new_debugger=new_debugger): + del new_debugger + self.addTearDownHook(del_debugger) + + new_platform = lldb.SBPlatform(lldb.remote_platform.GetName()) + new_debugger.SetSelectedPlatform(new_platform) + new_interpreter = new_debugger.GetCommandInterpreter() + + if unix_protocol: + connect_url = "%s://%s%s" % (protocol, hostname, socket_id) + else: + connect_url = "%s://%s:%s" % (protocol, hostname, socket_id) + + command = "platform connect %s" % (connect_url) + result = lldb.SBCommandReturnObject() + new_interpreter.HandleCommand(command, result) + self.assertTrue( + result.Succeeded(), + "platform process connect failed: %s" % + result.GetOutput()) + + target = new_debugger.GetSelectedTarget() + process = target.GetProcess() + thread = process.GetThreadAtIndex(0) + + breakpoint = target.BreakpointCreateByName("main") + process.Continue() + + frame = thread.GetFrameAtIndex(0) + self.assertEqual(frame.GetFunction().GetName(), "main") + self.assertEqual(frame.FindVariable("argc").GetValueAsSigned(), 2) + process.Continue() Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/main.cpp @@ -0,0 +1,6 @@ +#include + +int main(int argc, char **argv) { + printf("argc: %d\n", argc); + return argv[0][0]; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py @@ -0,0 +1,153 @@ +from __future__ import print_function + + +import gdbremote_testcase +import textwrap +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +def _extract_register_value(reg_info, reg_bank, byte_order, bytes_per_entry=8): + reg_offset = int(reg_info["offset"])*2 + reg_byte_size = int(2 * int(reg_info["bitsize"]) / 8) + # Create slice with the contents of the register. + reg_slice = reg_bank[reg_offset:reg_offset+reg_byte_size] + + reg_value = [] + # Wrap slice according to bytes_per_entry. + for entry in textwrap.wrap(reg_slice, 2 * bytes_per_entry): + # Invert the bytes order if target uses little-endian. + if byte_order == lldb.eByteOrderLittle: + entry = "".join(reversed([entry[i:i+2] for i in range(0, + len(entry),2)])) + reg_value.append("0x" + entry) + + return reg_value + + +class TestGdbRemoteGPacket(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def run_test_g_packet(self): + self.build() + self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + ["read packet: $g#67", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "register_bank"}}], + True) + self.connect_to_debug_monitor() + context = self.expect_gdbremote_sequence() + register_bank = context.get("register_bank") + self.assertTrue(register_bank[0] != 'E') + + self.test_sequence.add_log_lines( + ["read packet: $G" + register_bank + "#00", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "G_reply"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertTrue(context.get("G_reply")[0] != 'E') + + @skipIfOutOfTreeDebugserver + @debugserver_test + @skipIfDarwinEmbedded + def test_g_packet_debugserver(self): + self.init_debugserver_test() + self.run_test_g_packet() + + @skipIf(archs=no_match(["x86_64"])) + def g_returns_correct_data(self, with_suffix): + procs = self.prep_debug_monitor_and_inferior() + + self.add_register_info_collection_packets() + if with_suffix: + self.add_thread_suffix_request_packets() + self.add_threadinfo_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + # Index register info entries by name. + reg_infos = {info['name']: info for info in reg_infos} + + # Gather thread info. + if with_suffix: + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + thread_id = threads[0] + self.assertIsNotNone(thread_id) + else: + thread_id = None + + # Send vCont packet to resume the inferior. + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + # Send g packet to retrieve the register bank + if thread_id: + g_request = "read packet: $g;thread:{:x}#00".format(thread_id) + else: + g_request = "read packet: $g#00" + self.test_sequence.add_log_lines( + [g_request, + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "register_bank"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + reg_bank = context.get("register_bank") + self.assertTrue(reg_bank[0] != 'E') + + byte_order = self.get_target_byte_order() + get_reg_value = lambda reg_name : _extract_register_value( + reg_infos[reg_name], reg_bank, byte_order) + + self.assertEqual(['0x0102030405060708'], get_reg_value('r8')) + self.assertEqual(['0x1112131415161718'], get_reg_value('r9')) + self.assertEqual(['0x2122232425262728'], get_reg_value('r10')) + self.assertEqual(['0x3132333435363738'], get_reg_value('r11')) + self.assertEqual(['0x4142434445464748'], get_reg_value('r12')) + self.assertEqual(['0x5152535455565758'], get_reg_value('r13')) + self.assertEqual(['0x6162636465666768'], get_reg_value('r14')) + self.assertEqual(['0x7172737475767778'], get_reg_value('r15')) + + self.assertEqual( + ['0x020406080a0c0e01', '0x030507090b0d0f00'], get_reg_value('xmm8')) + self.assertEqual( + ['0x121416181a1c1e11', '0x131517191b1d1f10'], get_reg_value('xmm9')) + self.assertEqual( + ['0x222426282a2c2e21', '0x232527292b2d2f20'], get_reg_value('xmm10')) + self.assertEqual( + ['0x323436383a3c3e31', '0x333537393b3d3f30'], get_reg_value('xmm11')) + self.assertEqual( + ['0x424446484a4c4e41', '0x434547494b4d4f40'], get_reg_value('xmm12')) + self.assertEqual( + ['0x525456585a5c5e51', '0x535557595b5d5f50'], get_reg_value('xmm13')) + self.assertEqual( + ['0x626466686a6c6e61', '0x636567696b6d6f60'], get_reg_value('xmm14')) + self.assertEqual( + ['0x727476787a7c7e71', '0x737577797b7d7f70'], get_reg_value('xmm15')) + + @llgs_test + def test_g_returns_correct_data_with_suffix_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.g_returns_correct_data(True) + + @llgs_test + def test_g_returns_correct_data_no_suffix_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.g_returns_correct_data(False) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/main.cpp @@ -0,0 +1,54 @@ +#include + +struct alignas(16) xmm_t { + uint64_t a, b; +}; + +int main() { + uint64_t r8 = 0x0102030405060708; + uint64_t r9 = 0x1112131415161718; + uint64_t r10 = 0x2122232425262728; + uint64_t r11 = 0x3132333435363738; + uint64_t r12 = 0x4142434445464748; + uint64_t r13 = 0x5152535455565758; + uint64_t r14 = 0x6162636465666768; + uint64_t r15 = 0x7172737475767778; + + xmm_t xmm8 = {0x020406080A0C0E01, 0x030507090B0D0F00}; + xmm_t xmm9 = {0x121416181A1C1E11, 0x131517191B1D1F10}; + xmm_t xmm10 = {0x222426282A2C2E21, 0x232527292B2D2F20}; + xmm_t xmm11 = {0x323436383A3C3E31, 0x333537393B3D3F30}; + xmm_t xmm12 = {0x424446484A4C4E41, 0x434547494B4D4F40}; + xmm_t xmm13 = {0x525456585A5C5E51, 0x535557595B5D5F50}; + xmm_t xmm14 = {0x626466686A6C6E61, 0x636567696B6D6F60}; + xmm_t xmm15 = {0x727476787A7C7E71, 0x737577797B7D7F70}; + + asm volatile("movq %0, %%r8\n\t" + "movq %1, %%r9\n\t" + "movq %2, %%r10\n\t" + "movq %3, %%r11\n\t" + "movq %4, %%r12\n\t" + "movq %5, %%r13\n\t" + "movq %6, %%r14\n\t" + "movq %7, %%r15\n\t" + "\n\t" + "movaps %8, %%xmm8\n\t" + "movaps %9, %%xmm9\n\t" + "movaps %10, %%xmm10\n\t" + "movaps %11, %%xmm11\n\t" + "movaps %12, %%xmm12\n\t" + "movaps %13, %%xmm13\n\t" + "movaps %14, %%xmm14\n\t" + "movaps %15, %%xmm15\n\t" + "\n\t" + "int3" + : + : "g"(r8), "g"(r9), "g"(r10), "g"(r11), "g"(r12), "g"(r13), + "g"(r14), "g"(r15), "m"(xmm8), "m"(xmm9), "m"(xmm10), + "m"(xmm11), "m"(xmm12), "m"(xmm13), "m"(xmm14), "m"(xmm15) + : "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", + "%xmm14", "%xmm15"); + + return 0; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py @@ -0,0 +1,115 @@ +# This test makes sure that lldb-server supports and properly handles +# QPassSignals GDB protocol package. +from __future__ import print_function + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +class TestGdbRemote_QPassSignals(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def expect_signal(self, expected_signo): + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEqual(int(hex_exit_code, 16), expected_signo) + + def expect_exit_code(self, exit_code): + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W{0:02x}#00".format(exit_code)], + True) + self.expect_gdbremote_sequence() + + + def ignore_signals(self, signals): + def signal_name_to_hex(signame): + return format(lldbutil.get_signal_number(signame), 'x') + signals_str = ";".join(map(signal_name_to_hex, signals)) + + self.test_sequence.add_log_lines(["read packet: $QPassSignals:" + + signals_str + " #00", + "send packet: $OK#00"], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @llgs_test + @skipUnlessPlatform(["linux", "android"]) + def test_q_pass_signals(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + expected_signals = ["SIGSEGV", + "SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"] + signals_to_ignore = ["SIGUSR1", "SIGUSR2"] + self.ignore_signals(signals_to_ignore) + for signal_name in expected_signals: + signo = lldbutil.get_signal_number(signal_name) + self.expect_signal(signo) + self.expect_exit_code(len(signals_to_ignore)) + + @llgs_test + @skipUnlessPlatform(["linux", "android"]) + def test_change_signals_at_runtime(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2", + "SIGALRM", "SIGHUP"] + signals_to_ignore = ["SIGFPE", "SIGBUS", "SIGINT"] + + for signal_name in expected_signals: + signo = lldbutil.get_signal_number(signal_name) + self.expect_signal(signo) + if signal_name == "SIGALRM": + self.ignore_signals(signals_to_ignore) + self.expect_exit_code(len(signals_to_ignore)) + + @llgs_test + def test_default_signals_behavior(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2", + "SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"] + for signal_name in expected_signals: + signo = lldbutil.get_signal_number(signal_name) + self.expect_signal(signo) + self.expect_exit_code(0) + + + @llgs_test + @skipUnlessPlatform(["linux", "android"]) + def test_support_q_pass_signals(self): + self.init_llgs_test() + self.build() + + # Start up the stub and start/prep the inferior. + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + self.add_qSupported_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Retrieve the qSupported features and check QPassSignals+ + supported_dict = self.parse_qSupported_response(context) + self.assertEqual(supported_dict["QPassSignals"], "+") Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/main.cpp @@ -0,0 +1,36 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include +#include + +static int signal_counter = 0; + +static void count_signal(int signo) { + ++signal_counter; + printf("Signal %d\n", signo); +} + +static void raise_signals() { + std::vector signals( + {SIGSEGV, SIGUSR1, SIGUSR2, SIGALRM, SIGFPE, SIGBUS, SIGINT, SIGHUP}); + + for (int signal_num : signals) { + signal(signal_num, count_signal); + } + + for (int signal_num : signals) { + raise(signal_num); + } +} + +int main() { + raise_signals(); + return signal_counter; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/socket_packet_pump.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/socket_packet_pump.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/socket_packet_pump.py @@ -0,0 +1,198 @@ + +from __future__ import print_function + + +import re +import select +import threading +import traceback +import codecs + +from six.moves import queue +from lldbsuite.support import seven + + +def _handle_output_packet_string(packet_contents): + if (not packet_contents) or (len(packet_contents) < 1): + return None + elif packet_contents[0] != "O": + return None + elif packet_contents == "OK": + return None + else: + return seven.unhexlify(packet_contents[1:]) + + +def _dump_queue(the_queue): + while not the_queue.empty(): + print(codecs.encode(the_queue.get(True), "string_escape")) + print("\n") + + +class PumpQueues(object): + + def __init__(self): + self._output_queue = queue.Queue() + self._packet_queue = queue.Queue() + + def output_queue(self): + return self._output_queue + + def packet_queue(self): + return self._packet_queue + + def verify_queues_empty(self): + # Warn if there is any content left in any of the queues. + # That would represent unmatched packets. + if not self.output_queue().empty(): + print("warning: output queue entries still exist:") + _dump_queue(self.output_queue()) + print("from here:") + traceback.print_stack() + + if not self.packet_queue().empty(): + print("warning: packet queue entries still exist:") + _dump_queue(self.packet_queue()) + print("from here:") + traceback.print_stack() + + +class SocketPacketPump(object): + """A threaded packet reader that partitions packets into two streams. + + All incoming $O packet content is accumulated with the current accumulation + state put into the OutputQueue. + + All other incoming packets are placed in the packet queue. + + A select thread can be started and stopped, and runs to place packet + content into the two queues. + """ + + _GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}') + + def __init__(self, pump_socket, pump_queues, logger=None): + if not pump_socket: + raise Exception("pump_socket cannot be None") + + self._thread = None + self._stop_thread = False + self._socket = pump_socket + self._logger = logger + self._receive_buffer = "" + self._accumulated_output = "" + self._pump_queues = pump_queues + + def __enter__(self): + """Support the python 'with' statement. + + Start the pump thread.""" + self.start_pump_thread() + return self + + def __exit__(self, exit_type, value, the_traceback): + """Support the python 'with' statement. + + Shut down the pump thread.""" + self.stop_pump_thread() + + def start_pump_thread(self): + if self._thread: + raise Exception("pump thread is already running") + self._stop_thread = False + self._thread = threading.Thread(target=self._run_method) + self._thread.start() + + def stop_pump_thread(self): + self._stop_thread = True + if self._thread: + self._thread.join() + + def _process_new_bytes(self, new_bytes): + if not new_bytes: + return + if len(new_bytes) < 1: + return + + # Add new bytes to our accumulated unprocessed packet bytes. + self._receive_buffer += new_bytes + + # Parse fully-formed packets into individual packets. + has_more = len(self._receive_buffer) > 0 + while has_more: + if len(self._receive_buffer) <= 0: + has_more = False + # handle '+' ack + elif self._receive_buffer[0] == "+": + self._pump_queues.packet_queue().put("+") + self._receive_buffer = self._receive_buffer[1:] + if self._logger: + self._logger.debug( + "parsed packet from stub: +\n" + + "new receive_buffer: {}".format( + self._receive_buffer)) + else: + packet_match = self._GDB_REMOTE_PACKET_REGEX.match( + self._receive_buffer) + if packet_match: + # Our receive buffer matches a packet at the + # start of the receive buffer. + new_output_content = _handle_output_packet_string( + packet_match.group(1)) + if new_output_content: + # This was an $O packet with new content. + self._accumulated_output += new_output_content + self._pump_queues.output_queue().put(self._accumulated_output) + else: + # Any packet other than $O. + self._pump_queues.packet_queue().put(packet_match.group(0)) + + # Remove the parsed packet from the receive + # buffer. + self._receive_buffer = self._receive_buffer[ + len(packet_match.group(0)):] + if self._logger: + self._logger.debug( + "parsed packet from stub: " + + packet_match.group(0)) + self._logger.debug( + "new receive_buffer: " + + self._receive_buffer) + else: + # We don't have enough in the receive bufferto make a full + # packet. Stop trying until we read more. + has_more = False + + def _run_method(self): + self._receive_buffer = "" + self._accumulated_output = "" + + if self._logger: + self._logger.info("socket pump starting") + + # Keep looping around until we're asked to stop the thread. + while not self._stop_thread: + can_read, _, _ = select.select([self._socket], [], [], 0) + if can_read and self._socket in can_read: + try: + new_bytes = seven.bitcast_to_string(self._socket.recv(4096)) + if self._logger and new_bytes and len(new_bytes) > 0: + self._logger.debug( + "pump received bytes: {}".format(new_bytes)) + except: + # Likely a closed socket. Done with the pump thread. + if self._logger: + self._logger.debug( + "socket read failed, stopping pump read thread\n" + + traceback.format_exc(3)) + break + self._process_new_bytes(new_bytes) + + if self._logger: + self._logger.info("socket pump exiting") + + def get_accumulated_output(self): + return self._accumulated_output + + def get_receive_buffer(self): + return self._receive_buffer Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/test/test_lldbgdbserverutils.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/test/test_lldbgdbserverutils.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/test/test_lldbgdbserverutils.py @@ -0,0 +1,65 @@ +from __future__ import print_function + + +import unittest2 +import os.path +import re +import sys + +from lldbgdbserverutils import * + + +class TestLldbGdbServerUtils(unittest2.TestCase): + + def test_entry_exact_payload_match(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + entry.assert_match(self, "$OK#9a") + + def test_entry_exact_payload_match_ignores_checksum(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + entry.assert_match(self, "$OK#00") + + def test_entry_creates_context(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + context = entry.assert_match(self, "$OK#9a") + self.assertIsNotNone(context) + + def test_entry_regex_matches(self): + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), + capture={ + 1: "thread_id"}) + context = entry.assert_match(self, "$QC980#00") + + def test_entry_regex_saves_match(self): + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), + capture={ + 1: "thread_id"}) + context = entry.assert_match(self, "$QC980#00") + self.assertEqual(context["thread_id"], "980") + + def test_entry_regex_expect_captures_success(self): + context = {"thread_id": "980"} + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), + expect_captures={ + 2: "thread_id"}) + entry.assert_match(self, "$T11thread:980;", context=context) + + def test_entry_regex_expect_captures_raises_on_fail(self): + context = {"thread_id": "980"} + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), + expect_captures={ + 2: "thread_id"}) + try: + entry.assert_match(self, "$T11thread:970;", context=context) + self.fail() + except AssertionError: + # okay + return None Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/Makefile @@ -0,0 +1,6 @@ +LEVEL = ../../../make + +ENABLE_THREADS := YES +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/TestGdbRemoteThreadName.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/TestGdbRemoteThreadName.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/TestGdbRemoteThreadName.py @@ -0,0 +1,41 @@ +from __future__ import print_function + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteThreadName(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def run_and_check_name(self, expected_name): + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": + r"^\$T([0-9a-fA-F]{2})([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "signal", + 2: "key_vals_text"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + sigint = lldbutil.get_signal_number("SIGINT") + self.assertEqual(sigint, int(context.get("signal"), 16)) + kv_dict = self.parse_key_val_dict(context.get("key_vals_text")) + self.assertEqual(expected_name, kv_dict.get("name")) + + @llgs_test + def test(self): + """ Make sure lldb-server can retrieve inferior thread name""" + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + + self.run_and_check_name("hello world") + self.run_and_check_name("goodbye world") Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/main.cpp @@ -0,0 +1,22 @@ +#include +#include + +void set_thread_name(const char *name) { +#if defined(__APPLE__) + ::pthread_setname_np(name); +#elif defined(__FreeBSD__) + ::pthread_set_name_np(::pthread_self(), name); +#elif defined(__linux__) + ::pthread_setname_np(::pthread_self(), name); +#elif defined(__NetBSD__) + ::pthread_setname_np(::pthread_self(), "%s", name); +#endif +} + +int main() { + set_thread_name("hello world"); + raise(SIGINT); + set_thread_name("goodbye world"); + raise(SIGINT); + return 0; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/.categories =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/.categories +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/.categories @@ -0,0 +1 @@ +lldb-vscode Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +C_SOURCES := main.c + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/TestVSCode_attach.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/TestVSCode_attach.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/TestVSCode_attach.py @@ -0,0 +1,193 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os +import shutil +import subprocess +import tempfile +import threading +import time + + +def spawn_and_wait(program, delay): + if delay: + time.sleep(delay) + process = subprocess.Popen([program], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + process.wait() + + +class TestVSCode_attach(lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def set_and_hit_breakpoint(self, continueToExit=True): + source = 'main.c' + breakpoint1_line = line_number(source, '// breakpoint 1') + lines = [breakpoint1_line] + # Set breakoint in the thread function so we can step the threads + breakpoint_ids = self.set_source_breakpoints(source, lines) + self.assertEqual(len(breakpoint_ids), len(lines), + "expect correct number of breakpoints") + self.continue_to_breakpoints(breakpoint_ids) + if continueToExit: + self.continue_to_exit() + + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @skipIfNetBSD # Hangs on NetBSD as well + @no_debug_info_test + def test_by_pid(self): + ''' + Tests attaching to a process by process ID. + ''' + self.build_and_create_debug_adaptor() + program = self.getBuildArtifact("a.out") + self.process = subprocess.Popen([program], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + self.attach(pid=self.process.pid) + self.set_and_hit_breakpoint(continueToExit=True) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @skipIfNetBSD # Hangs on NetBSD as well + @no_debug_info_test + def test_by_name(self): + ''' + Tests attaching to a process by process name. + ''' + self.build_and_create_debug_adaptor() + orig_program = self.getBuildArtifact("a.out") + # Since we are going to attach by process name, we need a unique + # process name that has minimal chance to match a process that is + # already running. To do this we use tempfile.mktemp() to give us a + # full path to a location where we can copy our executable. We then + # run this copy to ensure we don't get the error "more that one + # process matches 'a.out'". + program = tempfile.mktemp() + shutil.copyfile(orig_program, program) + shutil.copymode(orig_program, program) + + def cleanup(): + if os.path.exists(program): + os.unlink(program) + # Execute the cleanup function during test case tear down. + self.addTearDownHook(cleanup) + + self.process = subprocess.Popen([program], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + # Wait for a bit to ensure the process is launched, but not for so long + # that the process has already finished by the time we attach. + time.sleep(3) + self.attach(program=program) + self.set_and_hit_breakpoint(continueToExit=True) + + @skipUnlessDarwin + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @skipIfNetBSD # Hangs on NetBSD as well + @no_debug_info_test + def test_by_name_waitFor(self): + ''' + Tests attaching to a process by process name and waiting for the + next instance of a process to be launched, ingoring all current + ones. + ''' + self.build_and_create_debug_adaptor() + program = self.getBuildArtifact("a.out") + self.spawn_thread = threading.Thread(target=spawn_and_wait, + args=(program, 1.0,)) + self.spawn_thread.start() + self.attach(program=program, waitFor=True) + self.set_and_hit_breakpoint(continueToExit=True) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @skipIfNetBSD # Hangs on NetBSD as well + @no_debug_info_test + def test_commands(self): + ''' + Tests the "initCommands", "preRunCommands", "stopCommands", + "exitCommands", and "attachCommands" that can be passed during + attach. + + "initCommands" are a list of LLDB commands that get executed + before the targt is created. + "preRunCommands" are a list of LLDB commands that get executed + after the target has been created and before the launch. + "stopCommands" are a list of LLDB commands that get executed each + time the program stops. + "exitCommands" are a list of LLDB commands that get executed when + the process exits + "attachCommands" are a list of LLDB commands that get executed and + must have a valid process in the selected target in LLDB after + they are done executing. This allows custom commands to create any + kind of debug session. + ''' + self.build_and_create_debug_adaptor() + program = self.getBuildArtifact("a.out") + # Here we just create a target and launch the process as a way to test + # if we are able to use attach commands to create any kind of a target + # and use it for debugging + attachCommands = [ + 'target create -d "%s"' % (program), + 'process launch -- arg1' + ] + initCommands = ['target list', 'platform list'] + preRunCommands = ['image list a.out', 'image dump sections a.out'] + stopCommands = ['frame variable', 'bt'] + exitCommands = ['expr 2+3', 'expr 3+4'] + self.attach(program=program, + attachCommands=attachCommands, + initCommands=initCommands, + preRunCommands=preRunCommands, + stopCommands=stopCommands, + exitCommands=exitCommands) + + # Get output from the console. This should contain both the + # "initCommands" and the "preRunCommands". + output = self.get_console() + # Verify all "initCommands" were found in console output + self.verify_commands('initCommands', output, initCommands) + # Verify all "preRunCommands" were found in console output + self.verify_commands('preRunCommands', output, preRunCommands) + + functions = ['main'] + breakpoint_ids = self.set_function_breakpoints(functions) + self.assertTrue(len(breakpoint_ids) == len(functions), + "expect one breakpoint") + self.continue_to_breakpoints(breakpoint_ids) + output = self.get_console(timeout=1.0) + self.verify_commands('stopCommands', output, stopCommands) + + # Continue after launch and hit the "pause()" call and stop the target. + # Get output from the console. This should contain both the + # "stopCommands" that were run after we stop. + self.vscode.request_continue() + time.sleep(0.5) + self.vscode.request_pause() + self.vscode.wait_for_stopped() + output = self.get_console(timeout=1.0) + self.verify_commands('stopCommands', output, stopCommands) + + # Continue until the program exits + self.continue_to_exit() + # Get output from the console. This should contain both the + # "exitCommands" that were run after the second breakpoint was hit + output = self.get_console(timeout=1.0) + self.verify_commands('exitCommands', output, exitCommands) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/main.c =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/main.c +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/attach/main.c @@ -0,0 +1,11 @@ +#include +#include + +int main(int argc, char const *argv[]) +{ + lldb_enable_attach(); + + printf("pid = %i\n", getpid()); + sleep(10); + return 0; // breakpoint 1 +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setBreakpoints.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setBreakpoints.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setBreakpoints.py @@ -0,0 +1,211 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import pprint +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os + + +class TestVSCode_setBreakpoints(lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_set_and_clear(self): + '''Tests setting and clearing source file and line breakpoints. + This packet is a bit tricky on the debug adaptor side since there + is no "clearBreakpoints" packet. Source file and line breakpoints + are set by sending a "setBreakpoints" packet with a source file + specified and zero or more source lines. If breakpoints have been + set in the source file before, any exising breakpoints must remain + set, and any new breakpoints must be created, and any breakpoints + that were in previous requests and are not in the current request + must be removed. This function tests this setting and clearing + and makes sure things happen correctly. It doesn't test hitting + breakpoints and the functionality of each breakpoint, like + 'conditions' and 'hitCondition' settings.''' + source_basename = 'main.cpp' + source_path = os.path.join(os.getcwd(), source_basename) + first_line = line_number('main.cpp', 'break 12') + second_line = line_number('main.cpp', 'break 13') + third_line = line_number('main.cpp', 'break 14') + lines = [first_line, second_line, third_line] + + # Visual Studio Code Debug Adaptors have no way to specify the file + # without launching or attaching to a process, so we must start a + # process in order to be able to set breakpoints. + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + + # Set 3 breakoints and verify that they got set correctly + response = self.vscode.request_setBreakpoints(source_path, lines) + line_to_id = {} + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + for breakpoint in breakpoints: + line = breakpoint['line'] + # Store the "id" of the breakpoint that was set for later + line_to_id[line] = breakpoint['id'] + self.assertTrue(line in lines, "line expected in lines array") + self.assertTrue(breakpoint['verified'], + "expect breakpoint verified") + + # There is no breakpoint delete packet, clients just send another + # setBreakpoints packet with the same source file with fewer lines. + # Below we remove the second line entry and call the setBreakpoints + # function again. We want to verify that any breakpoints that were set + # before still have the same "id". This means we didn't clear the + # breakpoint and set it again at the same location. We also need to + # verify that the second line location was actually removed. + lines.remove(second_line) + # Set 2 breakoints and verify that the previous breakoints that were + # set above are still set. + response = self.vscode.request_setBreakpoints(source_path, lines) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + for breakpoint in breakpoints: + line = breakpoint['line'] + # Verify the same breakpoints are still set within LLDB by + # making sure the breakpoint ID didn't change + self.assertTrue(line_to_id[line] == breakpoint['id'], + "verify previous breakpoints stayed the same") + self.assertTrue(line in lines, "line expected in lines array") + self.assertTrue(breakpoint['verified'], + "expect breakpoint still verified") + + # Now get the full list of breakpoints set in the target and verify + # we have only 2 breakpoints set. The response above could have told + # us about 2 breakpoints, but we want to make sure we don't have the + # third one still set in the target + response = self.vscode.request_testGetTargetBreakpoints() + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + for breakpoint in breakpoints: + line = breakpoint['line'] + # Verify the same breakpoints are still set within LLDB by + # making sure the breakpoint ID didn't change + self.assertTrue(line_to_id[line] == breakpoint['id'], + "verify previous breakpoints stayed the same") + self.assertTrue(line in lines, "line expected in lines array") + self.assertTrue(breakpoint['verified'], + "expect breakpoint still verified") + + # Now clear all breakpoints for the source file by passing down an + # empty lines array + lines = [] + response = self.vscode.request_setBreakpoints(source_path, lines) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + + # Verify with the target that all breakpoints have been cleared + response = self.vscode.request_testGetTargetBreakpoints() + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + + # Now set a breakpoint again in the same source file and verify it + # was added. + lines = [second_line] + response = self.vscode.request_setBreakpoints(source_path, lines) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + for breakpoint in breakpoints: + line = breakpoint['line'] + self.assertTrue(line in lines, "line expected in lines array") + self.assertTrue(breakpoint['verified'], + "expect breakpoint still verified") + + # Now get the full list of breakpoints set in the target and verify + # we have only 2 breakpoints set. The response above could have told + # us about 2 breakpoints, but we want to make sure we don't have the + # third one still set in the target + response = self.vscode.request_testGetTargetBreakpoints() + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(lines), + "expect %u source breakpoints" % (len(lines))) + for breakpoint in breakpoints: + line = breakpoint['line'] + self.assertTrue(line in lines, "line expected in lines array") + self.assertTrue(breakpoint['verified'], + "expect breakpoint still verified") + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_functionality(self): + '''Tests hitting breakpoints and the functionality of a single + breakpoint, like 'conditions' and 'hitCondition' settings.''' + source_basename = 'main.cpp' + source_path = os.path.join(os.getcwd(), source_basename) + loop_line = line_number('main.cpp', '// break loop') + + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + # Set a breakpoint at the loop line with no condition and no + # hitCondition + breakpoint_ids = self.set_source_breakpoints(source_path, [loop_line]) + self.assertTrue(len(breakpoint_ids) == 1, "expect one breakpoint") + self.vscode.request_continue() + + # Verify we hit the breakpoint we just set + self.verify_breakpoint_hit(breakpoint_ids) + + # Make sure i is zero at first breakpoint + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 0, 'i != 0 after hitting breakpoint') + + # Update the condition on our breakpoint + new_breakpoint_ids = self.set_source_breakpoints(source_path, + [loop_line], + condition="i==4") + self.assertTrue(breakpoint_ids == new_breakpoint_ids, + "existing breakpoint should have its condition " + "updated") + + self.continue_to_breakpoints(breakpoint_ids) + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 4, + 'i != 4 showing conditional works') + + new_breakpoint_ids = self.set_source_breakpoints(source_path, + [loop_line], + hitCondition="2") + + self.assertTrue(breakpoint_ids == new_breakpoint_ids, + "existing breakpoint should have its condition " + "updated") + + # Continue with a hitContidtion of 2 and expect it to skip 1 value + self.continue_to_breakpoints(breakpoint_ids) + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 6, + 'i != 6 showing hitCondition works') + + # continue after hitting our hitCondition and make sure it only goes + # up by 1 + self.continue_to_breakpoints(breakpoint_ids) + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 7, + 'i != 7 showing post hitCondition hits every time') Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setExceptionBreakpoints.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setExceptionBreakpoints.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setExceptionBreakpoints.py @@ -0,0 +1,52 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import pprint +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os + + +class TestVSCode_setExceptionBreakpoints( + lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @expectedFailureNetBSD + @no_debug_info_test + def test_functionality(self): + '''Tests setting and clearing exception breakpoints. + This packet is a bit tricky on the debug adaptor side since there + is no "clear exception breakpoints" packet. Exception breakpoints + are set by sending a "setExceptionBreakpoints" packet with zero or + more exception filters. If exception breakpoints have been set + before, any exising breakpoints must remain set, and any new + breakpoints must be created, and any breakpoints that were in + previous requests and are not in the current request must be + removed. This exception tests this setting and clearing and makes + sure things happen correctly. It doesn't test hitting breakpoints + and the functionality of each breakpoint, like 'conditions' and + x'hitCondition' settings. + ''' + # Visual Studio Code Debug Adaptors have no way to specify the file + # without launching or attaching to a process, so we must start a + # process in order to be able to set breakpoints. + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + + filters = ['cpp_throw', 'cpp_catch'] + response = self.vscode.request_setExceptionBreakpoints(filters) + if response: + self.assertTrue(response['success']) + + self.continue_to_exception_breakpoint('C++ Throw') + self.continue_to_exception_breakpoint('C++ Catch') Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setFunctionBreakpoints.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setFunctionBreakpoints.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/TestVSCode_setFunctionBreakpoints.py @@ -0,0 +1,166 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import pprint +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os + + +class TestVSCode_setFunctionBreakpoints( + lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_set_and_clear(self): + '''Tests setting and clearing function breakpoints. + This packet is a bit tricky on the debug adaptor side since there + is no "clearFunction Breakpoints" packet. Function breakpoints + are set by sending a "setFunctionBreakpoints" packet with zero or + more function names. If function breakpoints have been set before, + any exising breakpoints must remain set, and any new breakpoints + must be created, and any breakpoints that were in previous requests + and are not in the current request must be removed. This function + tests this setting and clearing and makes sure things happen + correctly. It doesn't test hitting breakpoints and the functionality + of each breakpoint, like 'conditions' and 'hitCondition' settings. + ''' + # Visual Studio Code Debug Adaptors have no way to specify the file + # without launching or attaching to a process, so we must start a + # process in order to be able to set breakpoints. + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + bp_id_12 = None + functions = ['twelve'] + # Set a function breakpoint at 'twelve' + response = self.vscode.request_setFunctionBreakpoints(functions) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(functions), + "expect %u source breakpoints" % (len(functions))) + for breakpoint in breakpoints: + bp_id_12 = breakpoint['id'] + self.assertTrue(breakpoint['verified'], + "expect breakpoint verified") + + # Add an extra name and make sure we have two breakpoints after this + functions.append('thirteen') + response = self.vscode.request_setFunctionBreakpoints(functions) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(functions), + "expect %u source breakpoints" % (len(functions))) + for breakpoint in breakpoints: + self.assertTrue(breakpoint['verified'], + "expect breakpoint verified") + + # There is no breakpoint delete packet, clients just send another + # setFunctionBreakpoints packet with the different function names. + functions.remove('thirteen') + response = self.vscode.request_setFunctionBreakpoints(functions) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(functions), + "expect %u source breakpoints" % (len(functions))) + for breakpoint in breakpoints: + bp_id = breakpoint['id'] + self.assertTrue(bp_id == bp_id_12, + 'verify "twelve" breakpoint ID is same') + self.assertTrue(breakpoint['verified'], + "expect breakpoint still verified") + + # Now get the full list of breakpoints set in the target and verify + # we have only 1 breakpoints set. The response above could have told + # us about 1 breakpoints, but we want to make sure we don't have the + # second one still set in the target + response = self.vscode.request_testGetTargetBreakpoints() + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(functions), + "expect %u source breakpoints" % (len(functions))) + for breakpoint in breakpoints: + bp_id = breakpoint['id'] + self.assertTrue(bp_id == bp_id_12, + 'verify "twelve" breakpoint ID is same') + self.assertTrue(breakpoint['verified'], + "expect breakpoint still verified") + + # Now clear all breakpoints for the source file by passing down an + # empty lines array + functions = [] + response = self.vscode.request_setFunctionBreakpoints(functions) + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(functions), + "expect %u source breakpoints" % (len(functions))) + + # Verify with the target that all breakpoints have been cleared + response = self.vscode.request_testGetTargetBreakpoints() + if response: + breakpoints = response['body']['breakpoints'] + self.assertTrue(len(breakpoints) == len(functions), + "expect %u source breakpoints" % (len(functions))) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_functionality(self): + '''Tests hitting breakpoints and the functionality of a single + breakpoint, like 'conditions' and 'hitCondition' settings.''' + + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + # Set a breakpoint on "twelve" with no condition and no hitCondition + functions = ['twelve'] + breakpoint_ids = self.set_function_breakpoints(functions) + + self.assertTrue(len(breakpoint_ids) == len(functions), + "expect one breakpoint") + + # Verify we hit the breakpoint we just set + self.continue_to_breakpoints(breakpoint_ids) + + # Make sure i is zero at first breakpoint + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 0, 'i != 0 after hitting breakpoint') + + # Update the condition on our breakpoint + new_breakpoint_ids = self.set_function_breakpoints(functions, + condition="i==4") + self.assertTrue(breakpoint_ids == new_breakpoint_ids, + "existing breakpoint should have its condition " + "updated") + + self.continue_to_breakpoints(breakpoint_ids) + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 4, + 'i != 4 showing conditional works') + new_breakpoint_ids = self.set_function_breakpoints(functions, + hitCondition="2") + + self.assertTrue(breakpoint_ids == new_breakpoint_ids, + "existing breakpoint should have its condition " + "updated") + + # Continue with a hitContidtion of 2 and expect it to skip 1 value + self.continue_to_breakpoints(breakpoint_ids) + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 6, + 'i != 6 showing hitCondition works') + + # continue after hitting our hitCondition and make sure it only goes + # up by 1 + self.continue_to_breakpoints(breakpoint_ids) + i = int(self.vscode.get_local_variable_value('i')) + self.assertTrue(i == 7, + 'i != 7 showing post hitCondition hits every time') Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/breakpoint/main.cpp @@ -0,0 +1,27 @@ +#include +#include + +int twelve(int i) { + return 12 + i; // break 12 +} + +int thirteen(int i) { + return 13 + i; // break 13 +} + +namespace a { + int fourteen(int i) { + return 14 + i; // break 14 + } +} +int main(int argc, char const *argv[]) { + for (int i=0; i<10; ++i) { + int x = twelve(i) + thirteen(i) + a::fourteen(i); // break loop + } + try { + throw std::invalid_argument( "throwing exception for testing" ); + } catch (...) { + puts("caught exception..."); + } + return 0; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +C_SOURCES := main.c + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/TestVSCode_launch.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/TestVSCode_launch.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/TestVSCode_launch.py @@ -0,0 +1,345 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import pprint +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os +import time + + +class TestVSCode_launch(lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_default(self): + ''' + Tests the default launch of a simple program. No arguments, + environment, or anything else is specified. + ''' + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + self.continue_to_exit() + # Now get the STDOUT and verify our program argument is correct + output = self.get_stdout() + self.assertTrue(output and len(output) > 0, + "expect program output") + lines = output.splitlines() + self.assertTrue(program in lines[0], + "make sure program path is in first argument") + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_stopOnEntry(self): + ''' + Tests the default launch of a simple program that stops at the + entry point instead of continuing. + ''' + program = self.getBuildArtifact("a.out") + self.build_and_launch(program, stopOnEntry=True) + self.set_function_breakpoints(['main']) + stopped_events = self.continue_to_next_stop() + for stopped_event in stopped_events: + if 'body' in stopped_event: + body = stopped_event['body'] + if 'reason' in body: + reason = body['reason'] + self.assertTrue( + reason != 'breakpoint', + 'verify stop isn\'t "main" breakpoint') + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @expectedFailureNetBSD + @no_debug_info_test + def test_cwd(self): + ''' + Tests the default launch of a simple program with a current working + directory. + ''' + program = self.getBuildArtifact("a.out") + program_parent_dir = os.path.split(os.path.split(program)[0])[0] + self.build_and_launch(program, + cwd=program_parent_dir) + self.continue_to_exit() + # Now get the STDOUT and verify our program argument is correct + output = self.get_stdout() + self.assertTrue(output and len(output) > 0, + "expect program output") + lines = output.splitlines() + found = False + for line in lines: + if line.startswith('cwd = \"'): + quote_path = '"%s"' % (program_parent_dir) + found = True + self.assertTrue(quote_path in line, + "working directory '%s' not in '%s'" % ( + program_parent_dir, line)) + self.assertTrue(found, "verified program working directory") + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @expectedFailureNetBSD + @no_debug_info_test + def test_debuggerRoot(self): + ''' + Tests the "debuggerRoot" will change the working directory of + the lldb-vscode debug adaptor. + ''' + program = self.getBuildArtifact("a.out") + program_parent_dir = os.path.split(os.path.split(program)[0])[0] + commands = ['platform shell echo cwd = $PWD'] + self.build_and_launch(program, + debuggerRoot=program_parent_dir, + initCommands=commands) + output = self.get_console() + self.assertTrue(output and len(output) > 0, + "expect console output") + lines = output.splitlines() + prefix = 'cwd = ' + found = False + for line in lines: + if line.startswith(prefix): + found = True + self.assertTrue(program_parent_dir == line[len(prefix):], + "lldb-vscode working dir '%s' == '%s'" % ( + program_parent_dir, line[6:])) + self.assertTrue(found, "verified lldb-vscode working directory") + self.continue_to_exit() + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_sourcePath(self): + ''' + Tests the "sourcePath" will set the target.source-map. + ''' + program = self.getBuildArtifact("a.out") + program_dir = os.path.split(program)[0] + self.build_and_launch(program, + sourcePath=program_dir) + output = self.get_console() + self.assertTrue(output and len(output) > 0, + "expect console output") + lines = output.splitlines() + prefix = '(lldb) settings set target.source-map "." ' + found = False + for line in lines: + if line.startswith(prefix): + found = True + quoted_path = '"%s"' % (program_dir) + self.assertTrue(quoted_path == line[len(prefix):], + "lldb-vscode working dir %s == %s" % ( + quoted_path, line[6:])) + self.assertTrue(found, 'found "sourcePath" in console output') + self.continue_to_exit() + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_disableSTDIO(self): + ''' + Tests the default launch of a simple program with STDIO disabled. + ''' + program = self.getBuildArtifact("a.out") + self.build_and_launch(program, + disableSTDIO=True) + self.continue_to_exit() + # Now get the STDOUT and verify our program argument is correct + output = self.get_stdout() + self.assertTrue(output is None or len(output) == 0, + "expect no program output") + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @skipIfLinux # shell argument expansion doesn't seem to work on Linux + @expectedFailureNetBSD + @no_debug_info_test + def test_shellExpandArguments_enabled(self): + ''' + Tests the default launch of a simple program with shell expansion + enabled. + ''' + program = self.getBuildArtifact("a.out") + program_dir = os.path.split(program)[0] + glob = os.path.join(program_dir, '*.out') + self.build_and_launch(program, args=[glob], shellExpandArguments=True) + self.continue_to_exit() + # Now get the STDOUT and verify our program argument is correct + output = self.get_stdout() + self.assertTrue(output and len(output) > 0, + "expect no program output") + lines = output.splitlines() + for line in lines: + quote_path = '"%s"' % (program) + if line.startswith("arg[1] ="): + self.assertTrue(quote_path in line, + 'verify "%s" expanded to "%s"' % ( + glob, program)) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_shellExpandArguments_disabled(self): + ''' + Tests the default launch of a simple program with shell expansion + disabled. + ''' + program = self.getBuildArtifact("a.out") + program_dir = os.path.split(program)[0] + glob = os.path.join(program_dir, '*.out') + self.build_and_launch(program, + args=[glob], + shellExpandArguments=False) + self.continue_to_exit() + # Now get the STDOUT and verify our program argument is correct + output = self.get_stdout() + self.assertTrue(output and len(output) > 0, + "expect no program output") + lines = output.splitlines() + for line in lines: + quote_path = '"%s"' % (glob) + if line.startswith("arg[1] ="): + self.assertTrue(quote_path in line, + 'verify "%s" stayed to "%s"' % ( + glob, glob)) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_args(self): + ''' + Tests launch of a simple program with arguments + ''' + program = self.getBuildArtifact("a.out") + args = ["one", "with space", "'with single quotes'", + '"with double quotes"'] + self.build_and_launch(program, + args=args) + self.continue_to_exit() + + # Now get the STDOUT and verify our arguments got passed correctly + output = self.get_stdout() + self.assertTrue(output and len(output) > 0, + "expect program output") + lines = output.splitlines() + # Skip the first argument that contains the program name + lines.pop(0) + # Make sure arguments we specified are correct + for (i, arg) in enumerate(args): + quoted_arg = '"%s"' % (arg) + self.assertTrue(quoted_arg in lines[i], + 'arg[%i] "%s" not in "%s"' % (i+1, quoted_arg, lines[i])) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_environment(self): + ''' + Tests launch of a simple program with environment variables + ''' + program = self.getBuildArtifact("a.out") + env = ["NO_VALUE", "WITH_VALUE=BAR", "EMPTY_VALUE=", + "SPACE=Hello World"] + self.build_and_launch(program, + env=env) + self.continue_to_exit() + + # Now get the STDOUT and verify our arguments got passed correctly + output = self.get_stdout() + self.assertTrue(output and len(output) > 0, + "expect program output") + lines = output.splitlines() + # Skip the all arguments so we have only environment vars left + while len(lines) and lines[0].startswith("arg["): + lines.pop(0) + # Make sure each environment variable in "env" is actually set in the + # program environment that was printed to STDOUT + for var in env: + found = False + for program_var in lines: + if var in program_var: + found = True + break + self.assertTrue(found, + '"%s" must exist in program environment (%s)' % ( + var, lines)) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_commands(self): + ''' + Tests the "initCommands", "preRunCommands", "stopCommands" and + "exitCommands" that can be passed during launch. + + "initCommands" are a list of LLDB commands that get executed + before the targt is created. + "preRunCommands" are a list of LLDB commands that get executed + after the target has been created and before the launch. + "stopCommands" are a list of LLDB commands that get executed each + time the program stops. + "exitCommands" are a list of LLDB commands that get executed when + the process exits + ''' + program = self.getBuildArtifact("a.out") + initCommands = ['target list', 'platform list'] + preRunCommands = ['image list a.out', 'image dump sections a.out'] + stopCommands = ['frame variable', 'bt'] + exitCommands = ['expr 2+3', 'expr 3+4'] + self.build_and_launch(program, + initCommands=initCommands, + preRunCommands=preRunCommands, + stopCommands=stopCommands, + exitCommands=exitCommands) + + # Get output from the console. This should contain both the + # "initCommands" and the "preRunCommands". + output = self.get_console() + # Verify all "initCommands" were found in console output + self.verify_commands('initCommands', output, initCommands) + # Verify all "preRunCommands" were found in console output + self.verify_commands('preRunCommands', output, preRunCommands) + + source = 'main.c' + first_line = line_number(source, '// breakpoint 1') + second_line = line_number(source, '// breakpoint 2') + lines = [first_line, second_line] + + # Set 2 breakoints so we can verify that "stopCommands" get run as the + # breakpoints get hit + breakpoint_ids = self.set_source_breakpoints(source, lines) + self.assertTrue(len(breakpoint_ids) == len(lines), + "expect correct number of breakpoints") + + # Continue after launch and hit the first breakpoint. + # Get output from the console. This should contain both the + # "stopCommands" that were run after the first breakpoint was hit + self.continue_to_breakpoints(breakpoint_ids) + output = self.get_console(timeout=1.0) + self.verify_commands('stopCommands', output, stopCommands) + + # Continue again and hit the second breakpoint. + # Get output from the console. This should contain both the + # "stopCommands" that were run after the second breakpoint was hit + self.continue_to_breakpoints(breakpoint_ids) + output = self.get_console(timeout=1.0) + self.verify_commands('stopCommands', output, stopCommands) + + # Continue until the program exits + self.continue_to_exit() + # Get output from the console. This should contain both the + # "exitCommands" that were run after the second breakpoint was hit + output = self.get_console(timeout=1.0) + self.verify_commands('exitCommands', output, exitCommands) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/main.c =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/main.c +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/launch/main.c @@ -0,0 +1,15 @@ +#include +#include +#include + +int main(int argc, char const *argv[], char const *envp[]) { + for (int i=0; i> 32 + bp_loc_id = response_id & 0xffffffff + breakpoint_ids.append('%i.%i' % (bp_id, bp_loc_id)) + return breakpoint_ids + + def set_function_breakpoints(self, functions, condition=None, + hitCondition=None): + '''Sets breakpoints by function name given an array of function names + and returns an array of strings containing the breakpoint location + IDs ("1.1", "1.2") for each breakpoint that was set. + ''' + response = self.vscode.request_setFunctionBreakpoints( + functions, condition=condition, hitCondition=hitCondition) + if response is None: + return [] + breakpoints = response['body']['breakpoints'] + breakpoint_ids = [] + for breakpoint in breakpoints: + response_id = breakpoint['id'] + bp_id = response_id >> 32 + bp_loc_id = response_id & 0xffffffff + breakpoint_ids.append('%i.%i' % (bp_id, bp_loc_id)) + return breakpoint_ids + + def verify_breakpoint_hit(self, breakpoint_ids): + '''Wait for the process we are debugging to stop, and verify we hit + any breakpoint location in the "breakpoint_ids" array. + "breakpoint_ids" should be a list of breakpoint location ID strings + (["1.1", "2.1"]). The return value from + self.set_source_breakpoints() can be passed to this function''' + stopped_events = self.vscode.wait_for_stopped() + for stopped_event in stopped_events: + if 'body' in stopped_event: + body = stopped_event['body'] + if 'reason' not in body: + continue + if body['reason'] != 'breakpoint': + continue + if 'description' not in body: + continue + # Description is "breakpoint 1.1", so look for any location id + # ("1.1") in the description field as verification that one of + # the breakpoint locations was hit + description = body['description'] + for breakpoint_id in breakpoint_ids: + if breakpoint_id in description: + return True + return False + + def verify_exception_breakpoint_hit(self, filter_label): + '''Wait for the process we are debugging to stop, and verify the stop + reason is 'exception' and that the description matches + 'filter_label' + ''' + stopped_events = self.vscode.wait_for_stopped() + for stopped_event in stopped_events: + if 'body' in stopped_event: + body = stopped_event['body'] + if 'reason' not in body: + continue + if body['reason'] != 'exception': + continue + if 'description' not in body: + continue + description = body['description'] + if filter_label == description: + return True + return False + + def verify_commands(self, flavor, output, commands): + self.assertTrue(output and len(output) > 0, "expect console output") + lines = output.splitlines() + prefix = '(lldb) ' + for cmd in commands: + found = False + for line in lines: + if line.startswith(prefix) and cmd in line: + found = True + break + self.assertTrue(found, + "verify '%s' found in console output for '%s'" % ( + cmd, flavor)) + + def get_dict_value(self, d, key_path): + '''Verify each key in the key_path array is in contained in each + dictionary within "d". Assert if any key isn't in the + corresponding dictionary. This is handy for grabbing values from VS + Code response dictionary like getting + response['body']['stackFrames'] + ''' + value = d + for key in key_path: + if key in value: + value = value[key] + else: + self.assertTrue(key in value, + 'key "%s" from key_path "%s" not in "%s"' % ( + key, key_path, d)) + return value + + def get_stackFrames(self, threadId=None, startFrame=None, levels=None, + dump=False): + response = self.vscode.request_stackTrace(threadId=threadId, + startFrame=startFrame, + levels=levels, + dump=dump) + if response: + return self.get_dict_value(response, ['body', 'stackFrames']) + return None + + def get_source_and_line(self, threadId=None, frameIndex=0): + stackFrames = self.get_stackFrames(threadId=threadId, + startFrame=frameIndex, + levels=1) + if stackFrames is not None: + stackFrame = stackFrames[0] + ['source', 'path'] + if 'source' in stackFrame: + source = stackFrame['source'] + if 'path' in source: + if 'line' in stackFrame: + return (source['path'], stackFrame['line']) + return ('', 0) + + def get_stdout(self, timeout=0.0): + return self.vscode.get_output('stdout', timeout=timeout) + + def get_console(self, timeout=0.0): + return self.vscode.get_output('console', timeout=timeout) + + def get_local_as_int(self, name, threadId=None): + value = self.vscode.get_local_variable_value(name, threadId=threadId) + if value.startswith('0x'): + return int(value, 16) + elif value.startswith('0'): + return int(value, 8) + else: + return int(value) + + def set_local(self, name, value, id=None): + '''Set a top level local variable only.''' + return self.vscode.request_setVariable(1, name, str(value), id=id) + + def set_global(self, name, value, id=None): + '''Set a top level global variable only.''' + return self.vscode.request_setVariable(2, name, str(value), id=id) + + def stepIn(self, threadId=None, waitForStop=True): + self.vscode.request_stepIn(threadId=threadId) + if waitForStop: + return self.vscode.wait_for_stopped() + return None + + def stepOver(self, threadId=None, waitForStop=True): + self.vscode.request_next(threadId=threadId) + if waitForStop: + return self.vscode.wait_for_stopped() + return None + + def stepOut(self, threadId=None, waitForStop=True): + self.vscode.request_stepOut(threadId=threadId) + if waitForStop: + return self.vscode.wait_for_stopped() + return None + + def continue_to_next_stop(self): + self.vscode.request_continue() + return self.vscode.wait_for_stopped() + + def continue_to_breakpoints(self, breakpoint_ids): + self.vscode.request_continue() + self.verify_breakpoint_hit(breakpoint_ids) + + def continue_to_exception_breakpoint(self, filter_label): + self.vscode.request_continue() + self.assertTrue(self.verify_exception_breakpoint_hit(filter_label), + 'verify we got "%s"' % (filter_label)) + + def continue_to_exit(self, exitCode=0): + self.vscode.request_continue() + stopped_events = self.vscode.wait_for_stopped() + self.assertTrue(len(stopped_events) == 1, + "expecting single 'exited' event") + self.assertTrue(stopped_events[0]['event'] == 'exited', + 'make sure program ran to completion') + self.assertTrue(stopped_events[0]['body']['exitCode'] == exitCode, + 'exitCode == %i' % (exitCode)) + + def attach(self, program=None, pid=None, waitFor=None, trace=None, + initCommands=None, preRunCommands=None, stopCommands=None, + exitCommands=None, attachCommands=None): + '''Build the default Makefile target, create the VSCode debug adaptor, + and attach to the process. + ''' + # Make sure we disconnect and terminate the VSCode debug adaptor even + # if we throw an exception during the test case. + def cleanup(): + self.vscode.request_disconnect(terminateDebuggee=True) + self.vscode.terminate() + + # Execute the cleanup function during test case tear down. + self.addTearDownHook(cleanup) + # Initialize and launch the program + self.vscode.request_initialize() + response = self.vscode.request_attach( + program=program, pid=pid, waitFor=waitFor, trace=trace, + initCommands=initCommands, preRunCommands=preRunCommands, + stopCommands=stopCommands, exitCommands=exitCommands, + attachCommands=attachCommands) + if not (response and response['success']): + self.assertTrue(response['success'], + 'attach failed (%s)' % (response['message'])) + + def build_and_launch(self, program, args=None, cwd=None, env=None, + stopOnEntry=False, disableASLR=True, + disableSTDIO=False, shellExpandArguments=False, + trace=False, initCommands=None, preRunCommands=None, + stopCommands=None, exitCommands=None, + sourcePath=None, debuggerRoot=None): + '''Build the default Makefile target, create the VSCode debug adaptor, + and launch the process. + ''' + self.build_and_create_debug_adaptor() + self.assertTrue(os.path.exists(program), 'executable must exist') + + # Make sure we disconnect and terminate the VSCode debug adaptor even + # if we throw an exception during the test case. + def cleanup(): + self.vscode.request_disconnect(terminateDebuggee=True) + self.vscode.terminate() + + # Execute the cleanup function during test case tear down. + self.addTearDownHook(cleanup) + + # Initialize and launch the program + self.vscode.request_initialize() + response = self.vscode.request_launch( + program, + args=args, + cwd=cwd, + env=env, + stopOnEntry=stopOnEntry, + disableASLR=disableASLR, + disableSTDIO=disableSTDIO, + shellExpandArguments=shellExpandArguments, + trace=trace, + initCommands=initCommands, + preRunCommands=preRunCommands, + stopCommands=stopCommands, + exitCommands=exitCommands, + sourcePath=sourcePath, + debuggerRoot=debuggerRoot) + if not (response and response['success']): + self.assertTrue(response['success'], + 'launch failed (%s)' % (response['message'])) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +C_SOURCES := main.c + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/TestVSCode_stackTrace.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/TestVSCode_stackTrace.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/TestVSCode_stackTrace.py @@ -0,0 +1,160 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os + + +class TestVSCode_stackTrace(lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + name_key_path = ['name'] + source_key_path = ['source', 'path'] + line_key_path = ['line'] + + def verify_stackFrames(self, start_idx, stackFrames): + frame_idx = start_idx + for stackFrame in stackFrames: + # Don't care about frame above main + if frame_idx > 20: + return + self.verify_stackFrame(frame_idx, stackFrame) + frame_idx += 1 + + def verify_stackFrame(self, frame_idx, stackFrame): + frame_name = self.get_dict_value(stackFrame, self.name_key_path) + frame_source = self.get_dict_value(stackFrame, self.source_key_path) + frame_line = self.get_dict_value(stackFrame, self.line_key_path) + if frame_idx == 0: + expected_line = self.recurse_end + expected_name = 'recurse' + elif frame_idx < 20: + expected_line = self.recurse_call + expected_name = 'recurse' + else: + expected_line = self.recurse_invocation + expected_name = 'main' + self.assertTrue(frame_name == expected_name, + 'frame #%i name "%s" == "%s"' % ( + frame_idx, frame_name, expected_name)) + self.assertTrue(frame_source == self.source_path, + 'frame #%i source "%s" == "%s"' % ( + frame_idx, frame_source, self.source_path)) + self.assertTrue(frame_line == expected_line, + 'frame #%i line %i == %i' % (frame_idx, frame_line, + expected_line)) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_stackTrace(self): + ''' + Tests the 'stackTrace' packet and all its variants. + ''' + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + source = 'main.c' + self.source_path = os.path.join(os.getcwd(), source) + self.recurse_end = line_number(source, 'recurse end') + self.recurse_call = line_number(source, 'recurse call') + self.recurse_invocation = line_number(source, 'recurse invocation') + + lines = [self.recurse_end] + + # Set breakoint at a point of deepest recuusion + breakpoint_ids = self.set_source_breakpoints(source, lines) + self.assertTrue(len(breakpoint_ids) == len(lines), + "expect correct number of breakpoints") + + self.continue_to_breakpoints(breakpoint_ids) + startFrame = 0 + # Verify we get all stack frames with no arguments + stackFrames = self.get_stackFrames() + frameCount = len(stackFrames) + self.assertTrue(frameCount >= 20, + 'verify we get at least 20 frames for all frames') + self.verify_stackFrames(startFrame, stackFrames) + + # Verify all stack frames by specifying startFrame = 0 and levels not + # specified + stackFrames = self.get_stackFrames(startFrame=startFrame) + self.assertTrue(frameCount == len(stackFrames), + ('verify same number of frames with startFrame=%i') % ( + startFrame)) + self.verify_stackFrames(startFrame, stackFrames) + + # Verify all stack frames by specifying startFrame = 0 and levels = 0 + levels = 0 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(frameCount == len(stackFrames), + ('verify same number of frames with startFrame=%i and' + ' levels=%i') % (startFrame, levels)) + self.verify_stackFrames(startFrame, stackFrames) + + # Get only the first stack frame by sepcifying startFrame = 0 and + # levels = 1 + levels = 1 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(levels == len(stackFrames), + ('verify one frame with startFrame=%i and' + ' levels=%i') % (startFrame, levels)) + self.verify_stackFrames(startFrame, stackFrames) + + # Get only the first 3 stack frames by sepcifying startFrame = 0 and + # levels = 3 + levels = 3 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(levels == len(stackFrames), + ('verify %i frames with startFrame=%i and' + ' levels=%i') % (levels, startFrame, levels)) + self.verify_stackFrames(startFrame, stackFrames) + + # Get only the first 15 stack frames by sepcifying startFrame = 5 and + # levels = 16 + startFrame = 5 + levels = 16 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(levels == len(stackFrames), + ('verify %i frames with startFrame=%i and' + ' levels=%i') % (levels, startFrame, levels)) + self.verify_stackFrames(startFrame, stackFrames) + + # Verify we cap things correctly when we ask for too many frames + startFrame = 5 + levels = 1000 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(len(stackFrames) == frameCount - startFrame, + ('verify less than 1000 frames with startFrame=%i and' + ' levels=%i') % (startFrame, levels)) + self.verify_stackFrames(startFrame, stackFrames) + + # Verify level=0 works with non-zerp start frame + startFrame = 5 + levels = 0 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(len(stackFrames) == frameCount - startFrame, + ('verify less than 1000 frames with startFrame=%i and' + ' levels=%i') % (startFrame, levels)) + self.verify_stackFrames(startFrame, stackFrames) + + # Verify we get not frames when startFrame is too high + startFrame = 1000 + levels = 1 + stackFrames = self.get_stackFrames(startFrame=startFrame, + levels=levels) + self.assertTrue(0 == len(stackFrames), + 'verify zero frames with startFrame out of bounds') Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/main.c =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/main.c +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/stackTrace/main.c @@ -0,0 +1,13 @@ +#include +#include + +int recurse(int x) { + if (x <= 1) + return 1; // recurse end + return recurse(x-1) + x; // recurse call +} + +int main(int argc, char const *argv[]) { + recurse(20); // recurse invocation + return 0; +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/Makefile @@ -0,0 +1,7 @@ +LEVEL = ../../../make + +ENABLE_THREADS := YES + +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/TestVSCode_step.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/TestVSCode_step.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/TestVSCode_step.py @@ -0,0 +1,79 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os + + +class TestVSCode_step(lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_step(self): + ''' + Tests the stepping in/out/over in threads. + ''' + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + source = 'main.cpp' + # source_path = os.path.join(os.getcwd(), source) + breakpoint1_line = line_number(source, '// breakpoint 1') + lines = [breakpoint1_line] + # Set breakoint in the thread function so we can step the threads + breakpoint_ids = self.set_source_breakpoints(source, lines) + self.assertEqual(len(breakpoint_ids), len(lines), + "expect correct number of breakpoints") + self.continue_to_breakpoints(breakpoint_ids) + threads = self.vscode.get_threads() + for thread in threads: + if 'reason' in thread: + reason = thread['reason'] + if reason == 'breakpoint': + # We have a thread that is stopped at our breakpoint. + # Get the value of "x" and get the source file and line. + # These will help us determine if we are stepping + # correctly. If we step a thread correctly we will verify + # the correct falue for x as it progresses through the + # program. + tid = thread['id'] + x1 = self.get_local_as_int('x', threadId=tid) + (src1, line1) = self.get_source_and_line(threadId=tid) + + # Now step into the "recurse()" function call again and + # verify, using the new value of "x" and the source file + # and line if we stepped correctly + self.stepIn(threadId=tid, waitForStop=True) + x2 = self.get_local_as_int('x', threadId=tid) + (src2, line2) = self.get_source_and_line(threadId=tid) + self.assertEqual(x1, x2 + 1, 'verify step in variable') + self.assertLess(line2, line1, 'verify step in line') + self.assertEqual(src1, src2, 'verify step in source') + + # Now step out and verify + self.stepOut(threadId=tid, waitForStop=True) + x3 = self.get_local_as_int('x', threadId=tid) + (src3, line3) = self.get_source_and_line(threadId=tid) + self.assertEqual(x1, x3, 'verify step out variable') + self.assertGreaterEqual(line3, line1, 'verify step out line') + self.assertEqual(src1, src3, 'verify step in source') + + # Step over and verify + self.stepOver(threadId=tid, waitForStop=True) + x4 = self.get_local_as_int('x', threadId=tid) + (src4, line4) = self.get_source_and_line(threadId=tid) + self.assertEqual(x4, x3, 'verify step over variable') + self.assertGreater(line4, line3, 'verify step over line') + self.assertEqual(src1, src4, 'verify step over source') + # only step one thread that is at the breakpoint and stop + break Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/step/main.cpp @@ -0,0 +1,10 @@ +int function(int x) { + if ((x % 2) == 0) + return function(x-1) + x; // breakpoint 1 + else + return x; +} + +int main(int argc, char const *argv[]) { + return function(2); +} Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/Makefile =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/Makefile +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/Makefile @@ -0,0 +1,5 @@ +LEVEL = ../../../make + +CXX_SOURCES := main.cpp + +include $(LEVEL)/Makefile.rules Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/TestVSCode_variables.py =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/TestVSCode_variables.py +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/TestVSCode_variables.py @@ -0,0 +1,225 @@ +""" +Test lldb-vscode setBreakpoints request +""" + +from __future__ import print_function + +import unittest2 +import vscode +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil +import lldbvscode_testcase +import os + + +def make_buffer_verify_dict(start_idx, count, offset=0): + verify_dict = {} + for i in range(start_idx, start_idx + count): + verify_dict['[%i]' % (i)] = {'type': 'int', 'value': str(i+offset)} + return verify_dict + + +class TestVSCode_variables(lldbvscode_testcase.VSCodeTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def verify_values(self, verify_dict, actual, varref_dict=None): + if 'equals' in verify_dict: + verify = verify_dict['equals'] + for key in verify: + verify_value = verify[key] + actual_value = actual[key] + self.assertTrue(verify_value == actual_value, + '"%s" keys don\'t match (%s != %s)' % ( + key, actual_value, verify_value)) + if 'startswith' in verify_dict: + verify = verify_dict['startswith'] + for key in verify: + verify_value = verify[key] + actual_value = actual[key] + startswith = actual_value.startswith(verify_value) + self.assertTrue(startswith, + ('"%s" value "%s" doesn\'t start with' + ' "%s")') % ( + key, actual_value, + verify_value)) + hasVariablesReference = 'variablesReference' in actual + varRef = None + if hasVariablesReference: + # Remember variable references in case we want to test further + # by using the evaluate name. + varRef = actual['variablesReference'] + if varRef != 0 and varref_dict is not None: + varref_dict[actual['evaluateName']] = varRef + if ('hasVariablesReference' in verify_dict and + verify_dict['hasVariablesReference']): + self.assertTrue(hasVariablesReference, + "verify variable reference") + if 'children' in verify_dict: + self.assertTrue(hasVariablesReference and varRef is not None and + varRef != 0, + ("children verify values specified for " + "variable without children")) + + response = self.vscode.request_variables(varRef) + self.verify_variables(verify_dict['children'], + response['body']['variables'], + varref_dict) + + def verify_variables(self, verify_dict, variables, varref_dict=None): + for variable in variables: + name = variable['name'] + self.assertTrue(name in verify_dict, + 'variable "%s" in verify dictionary' % (name)) + self.verify_values(verify_dict[name], variable, varref_dict) + + @skipIfWindows + @skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots + @no_debug_info_test + def test_scopes_variables_setVariable_evaluate(self): + ''' + Tests the "scopes", "variables", "setVariable", and "evaluate" + packets. + ''' + program = self.getBuildArtifact("a.out") + self.build_and_launch(program) + source = 'main.cpp' + breakpoint1_line = line_number(source, '// breakpoint 1') + lines = [breakpoint1_line] + # Set breakoint in the thread function so we can step the threads + breakpoint_ids = self.set_source_breakpoints(source, lines) + self.assertTrue(len(breakpoint_ids) == len(lines), + "expect correct number of breakpoints") + self.continue_to_breakpoints(breakpoint_ids) + locals = self.vscode.get_local_variables() + globals = self.vscode.get_global_variables() + buffer_children = make_buffer_verify_dict(0, 32) + verify_locals = { + 'argc': { + 'equals': {'type': 'int', 'value': '1'} + }, + 'argv': { + 'equals': {'type': 'const char **'}, + 'startswith': {'value': '0x'}, + 'hasVariablesReference': True + }, + 'pt': { + 'equals': {'type': 'PointType'}, + 'hasVariablesReference': True, + 'children': { + 'x': {'equals': {'type': 'int', 'value': '11'}}, + 'y': {'equals': {'type': 'int', 'value': '22'}}, + 'buffer': {'children': buffer_children} + } + } + } + verify_globals = { + 's_local': { + 'equals': {'type': 'float', 'value': '2.25'} + }, + '::g_global': { + 'equals': {'type': 'int', 'value': '123'} + }, + 's_global': { + 'equals': {'type': 'int', 'value': '234'} + }, + } + varref_dict = {} + self.verify_variables(verify_locals, locals, varref_dict) + self.verify_variables(verify_globals, globals, varref_dict) + # pprint.PrettyPrinter(indent=4).pprint(varref_dict) + # We need to test the functionality of the "variables" request as it + # has optional parameters like "start" and "count" to limit the number + # of variables that are fetched + varRef = varref_dict['pt.buffer'] + response = self.vscode.request_variables(varRef) + self.verify_variables(buffer_children, response['body']['variables']) + # Verify setting start=0 in the arguments still gets all children + response = self.vscode.request_variables(varRef, start=0) + self.verify_variables(buffer_children, response['body']['variables']) + # Verify setting count=0 in the arguments still gets all children. + # If count is zero, it means to get all children. + response = self.vscode.request_variables(varRef, count=0) + self.verify_variables(buffer_children, response['body']['variables']) + # Verify setting count to a value that is too large in the arguments + # still gets all children, and no more + response = self.vscode.request_variables(varRef, count=1000) + self.verify_variables(buffer_children, response['body']['variables']) + # Verify setting the start index and count gets only the children we + # want + response = self.vscode.request_variables(varRef, start=5, count=5) + self.verify_variables(make_buffer_verify_dict(5, 5), + response['body']['variables']) + # Verify setting the start index to a value that is out of range + # results in an empty list + response = self.vscode.request_variables(varRef, start=32, count=1) + self.assertTrue(len(response['body']['variables']) == 0, + 'verify we get no variable back for invalid start') + + # Test evaluate + expressions = { + 'pt.x': { + 'equals': {'result': '11', 'type': 'int'}, + 'hasVariablesReference': False + }, + 'pt.buffer[2]': { + 'equals': {'result': '2', 'type': 'int'}, + 'hasVariablesReference': False + }, + 'pt': { + 'equals': {'type': 'PointType'}, + 'startswith': {'result': 'PointType @ 0x'}, + 'hasVariablesReference': True + }, + 'pt.buffer': { + 'equals': {'type': 'int [32]'}, + 'startswith': {'result': 'int [32] @ 0x'}, + 'hasVariablesReference': True + }, + 'argv': { + 'equals': {'type': 'const char **'}, + 'startswith': {'result': '0x'}, + 'hasVariablesReference': True + }, + 'argv[0]': { + 'equals': {'type': 'const char *'}, + 'startswith': {'result': '0x'}, + 'hasVariablesReference': True + }, + '2+3': { + 'equals': {'result': '5', 'type': 'int'}, + 'hasVariablesReference': False + }, + } + for expression in expressions: + response = self.vscode.request_evaluate(expression) + self.verify_values(expressions[expression], response['body']) + + # Test setting variables + self.set_local('argc', 123) + argc = self.get_local_as_int('argc') + self.assertTrue(argc == 123, + 'verify argc was set to 123 (123 != %i)' % (argc)) + + self.set_local('argv', 0x1234) + argv = self.get_local_as_int('argv') + self.assertTrue(argv == 0x1234, + 'verify argv was set to 0x1234 (0x1234 != %#x)' % ( + argv)) + + # Set a variable value whose name is synthetic, like a variable index + # and verify the value by reading it + self.vscode.request_setVariable(varRef, "[0]", 100) + response = self.vscode.request_variables(varRef, start=0, count=1) + self.verify_variables(make_buffer_verify_dict(0, 1, 100), + response['body']['variables']) + + # Set a variable value whose name is a real child value, like "pt.x" + # and verify the value by reading it + varRef = varref_dict['pt'] + self.vscode.request_setVariable(varRef, "x", 111) + response = self.vscode.request_variables(varRef, start=0, count=1) + value = response['body']['variables'][0]['value'] + self.assertTrue(value == '111', + 'verify pt.x got set to 111 (111 != %s)' % (value)) Index: lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/main.cpp =================================================================== --- lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/main.cpp +++ lldb/trunk/packages/Python/lldbsuite/test/tools/lldb-vscode/variables/main.cpp @@ -0,0 +1,18 @@ + +#define BUFFER_SIZE 32 +struct PointType { + int x; + int y; + int buffer[BUFFER_SIZE]; +}; + +int g_global = 123; +static int s_global = 234; + +int main(int argc, char const *argv[]) { + static float s_local = 2.25; + PointType pt = { 11,22, {0}}; + for (int i=0; i= num_per_line: + curr_data_len = num_per_line + else: + curr_data_len = bytes_left + hex_start_idx = i * 2 + hex_end_idx = hex_start_idx + curr_data_len * 2 + curr_hex_str = hex_string[hex_start_idx:hex_end_idx] + # 'curr_hex_str' now contains the hex byte string for the + # current line with no spaces between bytes + t = iter(curr_hex_str) + # Print hex bytes separated by space + outfile.write(' '.join(a + b for a, b in zip(t, t))) + # Print two spaces + outfile.write(' ') + # Calculate ASCII string for bytes into 'ascii_str' + ascii_str = '' + for j in range(i, i + curr_data_len): + ch = data[j] + if ch in string.printable and ch not in string.whitespace: + ascii_str += '%c' % (ch) + else: + ascii_str += '.' + # Print ASCII representation and newline + outfile.write(ascii_str) + i = i + curr_data_len + outfile.write('\n') + + +def read_packet(f, verbose=False, trace_file=None): + '''Decode a JSON packet that starts with the content length and is + followed by the JSON bytes from a file 'f'. Returns None on EOF. + ''' + line = f.readline().decode("utf-8") + if len(line) == 0: + return None # EOF. + + # Watch for line that starts with the prefix + prefix = 'Content-Length: ' + if line.startswith(prefix): + # Decode length of JSON bytes + if verbose: + print('content: "%s"' % (line)) + length = int(line[len(prefix):]) + if verbose: + print('length: "%u"' % (length)) + # Skip empty line + line = f.readline() + if verbose: + print('empty: "%s"' % (line)) + # Read JSON bytes + json_str = f.read(length) + if verbose: + print('json: "%s"' % (json_str)) + if trace_file: + trace_file.write('from adaptor:\n%s\n' % (json_str)) + # Decode the JSON bytes into a python dictionary + return json.loads(json_str) + + return None + + +def packet_type_is(packet, packet_type): + return 'type' in packet and packet['type'] == packet_type + + +def read_packet_thread(vs_comm): + done = False + while not done: + packet = read_packet(vs_comm.recv, trace_file=vs_comm.trace_file) + # `packet` will be `None` on EOF. We want to pass it down to + # handle_recv_packet anyway so the main thread can handle unexpected + # termination of lldb-vscode and stop waiting for new packets. + done = not vs_comm.handle_recv_packet(packet) + + +class DebugCommunication(object): + + def __init__(self, recv, send): + self.trace_file = None + self.send = send + self.recv = recv + self.recv_packets = [] + self.recv_condition = threading.Condition() + self.recv_thread = threading.Thread(target=read_packet_thread, + args=(self,)) + self.process_event_body = None + self.exit_status = None + self.initialize_body = None + self.thread_stop_reasons = {} + self.sequence = 1 + self.threads = None + self.recv_thread.start() + self.output_condition = threading.Condition() + self.output = {} + self.configuration_done_sent = False + self.frame_scopes = {} + + @classmethod + def encode_content(cls, s): + return ("Content-Length: %u\r\n\r\n%s" % (len(s), s)).encode("utf-8") + + @classmethod + def validate_response(cls, command, response): + if command['command'] != response['command']: + raise ValueError('command mismatch in response') + if command['seq'] != response['request_seq']: + raise ValueError('seq mismatch in response') + + def get_output(self, category, timeout=0.0, clear=True): + self.output_condition.acquire() + output = None + if category in self.output: + output = self.output[category] + if clear: + del self.output[category] + elif timeout != 0.0: + self.output_condition.wait(timeout) + if category in self.output: + output = self.output[category] + if clear: + del self.output[category] + self.output_condition.release() + return output + + def enqueue_recv_packet(self, packet): + self.recv_condition.acquire() + self.recv_packets.append(packet) + self.recv_condition.notify() + self.recv_condition.release() + + def handle_recv_packet(self, packet): + '''Called by the read thread that is waiting for all incoming packets + to store the incoming packet in "self.recv_packets" in a thread safe + way. This function will then signal the "self.recv_condition" to + indicate a new packet is available. Returns True if the caller + should keep calling this function for more packets. + ''' + # If EOF, notify the read thread by enqueing a None. + if not packet: + self.enqueue_recv_packet(None) + return False + + # Check the packet to see if is an event packet + keepGoing = True + packet_type = packet['type'] + if packet_type == 'event': + event = packet['event'] + body = None + if 'body' in packet: + body = packet['body'] + # Handle the event packet and cache information from these packets + # as they come in + if event == 'output': + # Store any output we receive so clients can retrieve it later. + category = body['category'] + output = body['output'] + self.output_condition.acquire() + if category in self.output: + self.output[category] += output + else: + self.output[category] = output + self.output_condition.notify() + self.output_condition.release() + # no need to add 'output' packets to our packets list + return keepGoing + elif event == 'process': + # When a new process is attached or launched, remember the + # details that are available in the body of the event + self.process_event_body = body + elif event == 'stopped': + # Each thread that stops with a reason will send a + # 'stopped' event. We need to remember the thread stop + # reasons since the 'threads' command doesn't return + # that information. + self._process_stopped() + tid = body['threadId'] + self.thread_stop_reasons[tid] = body + elif packet_type == 'response': + if packet['command'] == 'disconnect': + keepGoing = False + self.enqueue_recv_packet(packet) + return keepGoing + + def send_packet(self, command_dict, set_sequence=True): + '''Take the "command_dict" python dictionary and encode it as a JSON + string and send the contents as a packet to the VSCode debug + adaptor''' + # Set the sequence ID for this command automatically + if set_sequence: + command_dict['seq'] = self.sequence + self.sequence += 1 + # Encode our command dictionary as a JSON string + json_str = json.dumps(command_dict, separators=(',', ':')) + if self.trace_file: + self.trace_file.write('to adaptor:\n%s\n' % (json_str)) + length = len(json_str) + if length > 0: + # Send the encoded JSON packet and flush the 'send' file + self.send.write(self.encode_content(json_str)) + self.send.flush() + + def recv_packet(self, filter_type=None, filter_event=None, timeout=None): + '''Get a JSON packet from the VSCode debug adaptor. This function + assumes a thread that reads packets is running and will deliver + any received packets by calling handle_recv_packet(...). This + function will wait for the packet to arrive and return it when + it does.''' + while True: + try: + self.recv_condition.acquire() + packet = None + while True: + for (i, curr_packet) in enumerate(self.recv_packets): + if not curr_packet: + raise EOFError + packet_type = curr_packet['type'] + if filter_type is None or packet_type in filter_type: + if (filter_event is None or + (packet_type == 'event' and + curr_packet['event'] in filter_event)): + packet = self.recv_packets.pop(i) + break + if packet: + break + # Sleep until packet is received + len_before = len(self.recv_packets) + self.recv_condition.wait(timeout) + len_after = len(self.recv_packets) + if len_before == len_after: + return None # Timed out + return packet + except EOFError: + return None + finally: + self.recv_condition.release() + + return None + + def send_recv(self, command): + '''Send a command python dictionary as JSON and receive the JSON + response. Validates that the response is the correct sequence and + command in the reply. Any events that are received are added to the + events list in this object''' + self.send_packet(command) + done = False + while not done: + response = self.recv_packet(filter_type='response') + if response is None: + desc = 'no response for "%s"' % (command['command']) + raise ValueError(desc) + self.validate_response(command, response) + return response + return None + + def wait_for_event(self, filter=None, timeout=None): + while True: + return self.recv_packet(filter_type='event', filter_event=filter, + timeout=timeout) + return None + + def wait_for_stopped(self, timeout=None): + stopped_events = [] + stopped_event = self.wait_for_event(filter=['stopped', 'exited'], + timeout=timeout) + exited = False + while stopped_event: + stopped_events.append(stopped_event) + # If we exited, then we are done + if stopped_event['event'] == 'exited': + self.exit_status = stopped_event['body']['exitCode'] + exited = True + break + # Otherwise we stopped and there might be one or more 'stopped' + # events for each thread that stopped with a reason, so keep + # checking for more 'stopped' events and return all of them + stopped_event = self.wait_for_event(filter='stopped', timeout=0.25) + if exited: + self.threads = [] + return stopped_events + + def wait_for_exited(self): + event_dict = self.wait_for_event('exited') + if event_dict is None: + raise ValueError("didn't get stopped event") + return event_dict + + def get_initialize_value(self, key): + '''Get a value for the given key if it there is a key/value pair in + the "initialize" request response body. + ''' + if self.initialize_body and key in self.initialize_body: + return self.initialize_body[key] + return None + + def get_threads(self): + if self.threads is None: + self.request_threads() + return self.threads + + def get_thread_id(self, threadIndex=0): + '''Utility function to get the first thread ID in the thread list. + If the thread list is empty, then fetch the threads. + ''' + if self.threads is None: + self.request_threads() + if self.threads and threadIndex < len(self.threads): + return self.threads[threadIndex]['id'] + return None + + def get_stackFrame(self, frameIndex=0, threadId=None): + '''Get a single "StackFrame" object from a "stackTrace" request and + return the "StackFrame as a python dictionary, or None on failure + ''' + if threadId is None: + threadId = self.get_thread_id() + if threadId is None: + print('invalid threadId') + return None + response = self.request_stackTrace(threadId, startFrame=frameIndex, + levels=1) + if response: + return response['body']['stackFrames'][0] + print('invalid response') + return None + + def get_scope_variables(self, scope_name, frameIndex=0, threadId=None): + stackFrame = self.get_stackFrame(frameIndex=frameIndex, + threadId=threadId) + if stackFrame is None: + return [] + frameId = stackFrame['id'] + if frameId in self.frame_scopes: + frame_scopes = self.frame_scopes[frameId] + else: + scopes_response = self.request_scopes(frameId) + frame_scopes = scopes_response['body']['scopes'] + self.frame_scopes[frameId] = frame_scopes + for scope in frame_scopes: + if scope['name'] == scope_name: + varRef = scope['variablesReference'] + variables_response = self.request_variables(varRef) + if variables_response: + if 'body' in variables_response: + body = variables_response['body'] + if 'variables' in body: + vars = body['variables'] + return vars + return [] + + def get_global_variables(self, frameIndex=0, threadId=None): + return self.get_scope_variables('Globals', frameIndex=frameIndex, + threadId=threadId) + + def get_local_variables(self, frameIndex=0, threadId=None): + return self.get_scope_variables('Locals', frameIndex=frameIndex, + threadId=threadId) + + def get_local_variable(self, name, frameIndex=0, threadId=None): + locals = self.get_local_variables(frameIndex=frameIndex, + threadId=threadId) + for local in locals: + if 'name' in local and local['name'] == name: + return local + return None + + def get_local_variable_value(self, name, frameIndex=0, threadId=None): + variable = self.get_local_variable(name, frameIndex=frameIndex, + threadId=threadId) + if variable and 'value' in variable: + return variable['value'] + return None + + def replay_packets(self, replay_file_path): + f = open(replay_file_path, 'r') + mode = 'invalid' + set_sequence = False + command_dict = None + while mode != 'eof': + if mode == 'invalid': + line = f.readline() + if line.startswith('to adapter:'): + mode = 'send' + elif line.startswith('from adapter:'): + mode = 'recv' + elif mode == 'send': + command_dict = read_packet(f) + # Skip the end of line that follows the JSON + f.readline() + if command_dict is None: + raise ValueError('decode packet failed from replay file') + print('Sending:') + pprint.PrettyPrinter(indent=2).pprint(command_dict) + # raw_input('Press ENTER to send:') + self.send_packet(command_dict, set_sequence) + mode = 'invalid' + elif mode == 'recv': + print('Replay response:') + replay_response = read_packet(f) + # Skip the end of line that follows the JSON + f.readline() + pprint.PrettyPrinter(indent=2).pprint(replay_response) + actual_response = self.recv_packet() + if actual_response: + type = actual_response['type'] + print('Actual response:') + if type == 'response': + self.validate_response(command_dict, actual_response) + pprint.PrettyPrinter(indent=2).pprint(actual_response) + else: + print("error: didn't get a valid response") + mode = 'invalid' + + def request_attach(self, program=None, pid=None, waitFor=None, trace=None, + initCommands=None, preRunCommands=None, + stopCommands=None, exitCommands=None, + attachCommands=None): + args_dict = {} + if pid is not None: + args_dict['pid'] = pid + if program is not None: + args_dict['program'] = program + if waitFor is not None: + args_dict['waitFor'] = waitFor + if trace: + args_dict['trace'] = trace + args_dict['initCommands'] = [ + 'settings set symbols.enable-external-lookup false'] + if initCommands: + args_dict['initCommands'].extend(initCommands) + if preRunCommands: + args_dict['preRunCommands'] = preRunCommands + if stopCommands: + args_dict['stopCommands'] = stopCommands + if exitCommands: + args_dict['exitCommands'] = exitCommands + if attachCommands: + args_dict['attachCommands'] = attachCommands + command_dict = { + 'command': 'attach', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_configurationDone(self): + command_dict = { + 'command': 'configurationDone', + 'type': 'request', + 'arguments': {} + } + response = self.send_recv(command_dict) + if response: + self.configuration_done_sent = True + return response + + def _process_stopped(self): + self.threads = None + self.frame_scopes = {} + + def request_continue(self, threadId=None): + if self.exit_status is not None: + raise ValueError('request_continue called after process exited') + # If we have launched or attached, then the first continue is done by + # sending the 'configurationDone' request + if not self.configuration_done_sent: + return self.request_configurationDone() + args_dict = {} + if threadId is None: + threadId = self.get_thread_id() + args_dict['threadId'] = threadId + command_dict = { + 'command': 'continue', + 'type': 'request', + 'arguments': args_dict + } + response = self.send_recv(command_dict) + recv_packets = [] + self.recv_condition.acquire() + for event in self.recv_packets: + if event['event'] != 'stopped': + recv_packets.append(event) + self.recv_packets = recv_packets + self.recv_condition.release() + return response + + def request_disconnect(self, terminateDebuggee=None): + args_dict = {} + if terminateDebuggee is not None: + if terminateDebuggee: + args_dict['terminateDebuggee'] = True + else: + args_dict['terminateDebuggee'] = False + command_dict = { + 'command': 'disconnect', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_evaluate(self, expression, frameIndex=0, threadId=None): + stackFrame = self.get_stackFrame(frameIndex=frameIndex, + threadId=threadId) + if stackFrame is None: + return [] + args_dict = { + 'expression': expression, + 'frameId': stackFrame['id'], + } + command_dict = { + 'command': 'evaluate', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_initialize(self): + command_dict = { + 'command': 'initialize', + 'type': 'request', + 'arguments': { + 'adapterID': 'lldb-native', + 'clientID': 'vscode', + 'columnsStartAt1': True, + 'linesStartAt1': True, + 'locale': 'en-us', + 'pathFormat': 'path', + 'supportsRunInTerminalRequest': True, + 'supportsVariablePaging': True, + 'supportsVariableType': True + } + } + response = self.send_recv(command_dict) + if response: + if 'body' in response: + self.initialize_body = response['body'] + return response + + def request_launch(self, program, args=None, cwd=None, env=None, + stopOnEntry=False, disableASLR=True, + disableSTDIO=False, shellExpandArguments=False, + trace=False, initCommands=None, preRunCommands=None, + stopCommands=None, exitCommands=None, sourcePath=None, + debuggerRoot=None): + args_dict = { + 'program': program + } + if args: + args_dict['args'] = args + if cwd: + args_dict['cwd'] = cwd + if env: + args_dict['env'] = env + if stopOnEntry: + args_dict['stopOnEntry'] = stopOnEntry + if disableASLR: + args_dict['disableASLR'] = disableASLR + if disableSTDIO: + args_dict['disableSTDIO'] = disableSTDIO + if shellExpandArguments: + args_dict['shellExpandArguments'] = shellExpandArguments + if trace: + args_dict['trace'] = trace + args_dict['initCommands'] = [ + 'settings set symbols.enable-external-lookup false'] + if initCommands: + args_dict['initCommands'].extend(initCommands) + if preRunCommands: + args_dict['preRunCommands'] = preRunCommands + if stopCommands: + args_dict['stopCommands'] = stopCommands + if exitCommands: + args_dict['exitCommands'] = exitCommands + if sourcePath: + args_dict['sourcePath'] = sourcePath + if debuggerRoot: + args_dict['debuggerRoot'] = debuggerRoot + command_dict = { + 'command': 'launch', + 'type': 'request', + 'arguments': args_dict + } + response = self.send_recv(command_dict) + + # Wait for a 'process' and 'initialized' event in any order + self.wait_for_event(filter=['process', 'initialized']) + self.wait_for_event(filter=['process', 'initialized']) + return response + + def request_next(self, threadId): + if self.exit_status is not None: + raise ValueError('request_continue called after process exited') + args_dict = {'threadId': threadId} + command_dict = { + 'command': 'next', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_stepIn(self, threadId): + if self.exit_status is not None: + raise ValueError('request_continue called after process exited') + args_dict = {'threadId': threadId} + command_dict = { + 'command': 'stepIn', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_stepOut(self, threadId): + if self.exit_status is not None: + raise ValueError('request_continue called after process exited') + args_dict = {'threadId': threadId} + command_dict = { + 'command': 'stepOut', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_pause(self, threadId=None): + if self.exit_status is not None: + raise ValueError('request_continue called after process exited') + if threadId is None: + threadId = self.get_thread_id() + args_dict = {'threadId': threadId} + command_dict = { + 'command': 'pause', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_scopes(self, frameId): + args_dict = {'frameId': frameId} + command_dict = { + 'command': 'scopes', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_setBreakpoints(self, file_path, line_array, condition=None, + hitCondition=None): + (dir, base) = os.path.split(file_path) + breakpoints = [] + for line in line_array: + bp = {'line': line} + if condition is not None: + bp['condition'] = condition + if hitCondition is not None: + bp['hitCondition'] = hitCondition + breakpoints.append(bp) + source_dict = { + 'name': base, + 'path': file_path + } + args_dict = { + 'source': source_dict, + 'breakpoints': breakpoints, + 'lines': '%s' % (line_array), + 'sourceModified': False, + } + command_dict = { + 'command': 'setBreakpoints', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_setExceptionBreakpoints(self, filters): + args_dict = {'filters': filters} + command_dict = { + 'command': 'setExceptionBreakpoints', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_setFunctionBreakpoints(self, names, condition=None, + hitCondition=None): + breakpoints = [] + for name in names: + bp = {'name': name} + if condition is not None: + bp['condition'] = condition + if hitCondition is not None: + bp['hitCondition'] = hitCondition + breakpoints.append(bp) + args_dict = {'breakpoints': breakpoints} + command_dict = { + 'command': 'setFunctionBreakpoints', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_stackTrace(self, threadId=None, startFrame=None, levels=None, + dump=False): + if threadId is None: + threadId = self.get_thread_id() + args_dict = {'threadId': threadId} + if startFrame is not None: + args_dict['startFrame'] = startFrame + if levels is not None: + args_dict['levels'] = levels + command_dict = { + 'command': 'stackTrace', + 'type': 'request', + 'arguments': args_dict + } + response = self.send_recv(command_dict) + if dump: + for (idx, frame) in enumerate(response['body']['stackFrames']): + name = frame['name'] + if 'line' in frame and 'source' in frame: + source = frame['source'] + if 'sourceReference' not in source: + if 'name' in source: + source_name = source['name'] + line = frame['line'] + print("[%3u] %s @ %s:%u" % (idx, name, source_name, + line)) + continue + print("[%3u] %s" % (idx, name)) + return response + + def request_threads(self): + '''Request a list of all threads and combine any information from any + "stopped" events since those contain more information about why a + thread actually stopped. Returns an array of thread dictionaries + with information about all threads''' + command_dict = { + 'command': 'threads', + 'type': 'request', + 'arguments': {} + } + response = self.send_recv(command_dict) + body = response['body'] + # Fill in "self.threads" correctly so that clients that call + # self.get_threads() or self.get_thread_id(...) can get information + # on threads when the process is stopped. + if 'threads' in body: + self.threads = body['threads'] + for thread in self.threads: + # Copy the thread dictionary so we can add key/value pairs to + # it without affecfting the original info from the "threads" + # command. + tid = thread['id'] + if tid in self.thread_stop_reasons: + thread_stop_info = self.thread_stop_reasons[tid] + copy_keys = ['reason', 'description', 'text'] + for key in copy_keys: + if key in thread_stop_info: + thread[key] = thread_stop_info[key] + else: + self.threads = None + return response + + def request_variables(self, variablesReference, start=None, count=None): + args_dict = {'variablesReference': variablesReference} + if start is not None: + args_dict['start'] = start + if count is not None: + args_dict['count'] = count + command_dict = { + 'command': 'variables', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_setVariable(self, containingVarRef, name, value, id=None): + args_dict = { + 'variablesReference': containingVarRef, + 'name': name, + 'value': str(value) + } + if id is not None: + args_dict['id'] = id + command_dict = { + 'command': 'setVariable', + 'type': 'request', + 'arguments': args_dict + } + return self.send_recv(command_dict) + + def request_testGetTargetBreakpoints(self): + '''A request packet used in the LLDB test suite to get all currently + set breakpoint infos for all breakpoints currently set in the + target. + ''' + command_dict = { + 'command': '_testGetTargetBreakpoints', + 'type': 'request', + 'arguments': {} + } + return self.send_recv(command_dict) + + def terminate(self): + self.send.close() + # self.recv.close() + + +class DebugAdaptor(DebugCommunication): + def __init__(self, executable=None, port=None): + self.process = None + if executable is not None: + self.process = subprocess.Popen([executable], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + DebugCommunication.__init__(self, self.process.stdout, + self.process.stdin) + elif port is not None: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(('127.0.0.1', port)) + DebugCommunication.__init__(self, s.makefile('r'), s.makefile('w')) + + def get_pid(self): + if self.process: + return self.process.pid + return -1 + + def terminate(self): + super(DebugAdaptor, self).terminate() + if self.process is not None: + self.process.terminate() + self.process.wait() + self.process = None + + +def attach_options_specified(options): + if options.pid is not None: + return True + if options.waitFor: + return True + if options.attach: + return True + if options.attachCmds: + return True + return False + + +def run_vscode(dbg, args, options): + dbg.request_initialize() + if attach_options_specified(options): + response = dbg.request_attach(program=options.program, + pid=options.pid, + waitFor=options.waitFor, + attachCommands=options.attachCmds, + initCommands=options.initCmds, + preRunCommands=options.preRunCmds, + stopCommands=options.stopCmds, + exitCommands=options.exitCmds) + else: + response = dbg.request_launch(options.program, + args=args, + env=options.envs, + cwd=options.workingDir, + debuggerRoot=options.debuggerRoot, + sourcePath=options.sourcePath, + initCommands=options.initCmds, + preRunCommands=options.preRunCmds, + stopCommands=options.stopCmds, + exitCommands=options.exitCmds) + + if response['success']: + if options.sourceBreakpoints: + source_to_lines = {} + for file_line in options.sourceBreakpoints: + (path, line) = file_line.split(':') + if len(path) == 0 or len(line) == 0: + print('error: invalid source with line "%s"' % + (file_line)) + + else: + if path in source_to_lines: + source_to_lines[path].append(int(line)) + else: + source_to_lines[path] = [int(line)] + for source in source_to_lines: + dbg.request_setBreakpoints(source, source_to_lines[source]) + if options.funcBreakpoints: + dbg.request_setFunctionBreakpoints(options.funcBreakpoints) + dbg.request_configurationDone() + dbg.wait_for_stopped() + else: + if 'message' in response: + print(response['message']) + dbg.request_disconnect(terminateDebuggee=True) + + +def main(): + parser = optparse.OptionParser( + description=('A testing framework for the Visual Studio Code Debug ' + 'Adaptor protocol')) + + parser.add_option( + '--vscode', + type='string', + dest='vscode_path', + help=('The path to the command line program that implements the ' + 'Visual Studio Code Debug Adaptor protocol.'), + default=None) + + parser.add_option( + '--program', + type='string', + dest='program', + help='The path to the program to debug.', + default=None) + + parser.add_option( + '--workingDir', + type='string', + dest='workingDir', + default=None, + help='Set the working directory for the process we launch.') + + parser.add_option( + '--sourcePath', + type='string', + dest='sourcePath', + default=None, + help=('Set the relative source root for any debug info that has ' + 'relative paths in it.')) + + parser.add_option( + '--debuggerRoot', + type='string', + dest='debuggerRoot', + default=None, + help=('Set the working directory for lldb-vscode for any object files ' + 'with relative paths in the Mach-o debug map.')) + + parser.add_option( + '-r', '--replay', + type='string', + dest='replay', + help=('Specify a file containing a packet log to replay with the ' + 'current Visual Studio Code Debug Adaptor executable.'), + default=None) + + parser.add_option( + '-g', '--debug', + action='store_true', + dest='debug', + default=False, + help='Pause waiting for a debugger to attach to the debug adaptor') + + parser.add_option( + '--port', + type='int', + dest='port', + help="Attach a socket to a port instead of using STDIN for VSCode", + default=None) + + parser.add_option( + '--pid', + type='int', + dest='pid', + help="The process ID to attach to", + default=None) + + parser.add_option( + '--attach', + action='store_true', + dest='attach', + default=False, + help=('Specify this option to attach to a process by name. The ' + 'process name is the basanme of the executable specified with ' + 'the --program option.')) + + parser.add_option( + '-f', '--function-bp', + type='string', + action='append', + dest='funcBreakpoints', + help=('Specify the name of a function to break at. ' + 'Can be specified more than once.'), + default=[]) + + parser.add_option( + '-s', '--source-bp', + type='string', + action='append', + dest='sourceBreakpoints', + default=[], + help=('Specify source breakpoints to set in the format of ' + ':. ' + 'Can be specified more than once.')) + + parser.add_option( + '--attachCommand', + type='string', + action='append', + dest='attachCmds', + default=[], + help=('Specify a LLDB command that will attach to a process. ' + 'Can be specified more than once.')) + + parser.add_option( + '--initCommand', + type='string', + action='append', + dest='initCmds', + default=[], + help=('Specify a LLDB command that will be executed before the target ' + 'is created. Can be specified more than once.')) + + parser.add_option( + '--preRunCommand', + type='string', + action='append', + dest='preRunCmds', + default=[], + help=('Specify a LLDB command that will be executed after the target ' + 'has been created. Can be specified more than once.')) + + parser.add_option( + '--stopCommand', + type='string', + action='append', + dest='stopCmds', + default=[], + help=('Specify a LLDB command that will be executed each time the' + 'process stops. Can be specified more than once.')) + + parser.add_option( + '--exitCommand', + type='string', + action='append', + dest='exitCmds', + default=[], + help=('Specify a LLDB command that will be executed when the process ' + 'exits. Can be specified more than once.')) + + parser.add_option( + '--env', + type='string', + action='append', + dest='envs', + default=[], + help=('Specify environment variables to pass to the launched ' + 'process.')) + + parser.add_option( + '--waitFor', + action='store_true', + dest='waitFor', + default=False, + help=('Wait for the next process to be launched whose name matches ' + 'the basename of the program specified with the --program ' + 'option')) + + (options, args) = parser.parse_args(sys.argv[1:]) + + if options.vscode_path is None and options.port is None: + print('error: must either specify a path to a Visual Studio Code ' + 'Debug Adaptor vscode executable path using the --vscode ' + 'option, or a port to attach to for an existing lldb-vscode ' + 'using the --port option') + return + dbg = DebugAdaptor(executable=options.vscode_path, port=options.port) + if options.debug: + raw_input('Waiting for debugger to attach pid "%i"' % ( + dbg.get_pid())) + if options.replay: + dbg.replay_packets(options.replay) + else: + run_vscode(dbg, args, options) + dbg.terminate() + + +if __name__ == '__main__': + main()