Index: test/dotest.py =================================================================== --- test/dotest.py +++ test/dotest.py @@ -136,7 +136,7 @@ # Something for Windows here? dont_do_debugserver_test = "linux" in sys.platform or "freebsd" in sys.platform -# Don't do lldb-gdbserver (llgs) tests on anything except Linux. +# Don't do lldb-server (llgs) tests on anything except Linux. dont_do_llgs_test = not ("linux" in sys.platform) # The blacklist is optional (-b blacklistFile) and allows a central place to skip Index: test/lldbtest.py =================================================================== --- test/lldbtest.py +++ test/lldbtest.py @@ -494,7 +494,7 @@ return wrapper def llgs_test(func): - """Decorate the item as a lldb-gdbserver test.""" + """Decorate the item as a lldb-server test.""" if isinstance(func, type) and issubclass(func, unittest2.TestCase): raise Exception("@llgs_test can only be used to decorate a test method") @wraps(func) Index: test/tools/lldb-gdbserver/Makefile =================================================================== --- test/tools/lldb-gdbserver/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -LEVEL = ../../make - -CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS -ENABLE_THREADS := YES -CXX_SOURCES := main.cpp -MAKE_DSYM :=NO - -include $(LEVEL)/Makefile.rules Index: test/tools/lldb-gdbserver/TestGdbRemoteAttach.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteAttach.py +++ /dev/null @@ -1,122 +0,0 @@ -import gdbremote_testcase -import lldbgdbserverutils -import unittest2 - -from lldbtest import * - -class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase): - - def attach_with_vAttach(self): - # Start the inferior, start the debug monitor, nothing is attached yet. - procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:60"]) - self.assertIsNotNone(procs) - - # Make sure the target process has been launched. - inferior = procs.get("inferior") - self.assertIsNotNone(inferior) - self.assertTrue(inferior.pid > 0) - self.assertTrue(lldbgdbserverutils.process_is_running(inferior.pid, True)) - - # Add attach packets. - self.test_sequence.add_log_lines([ - # Do the attach. - "read packet: $vAttach;{:x}#00".format(inferior.pid), - # Expect a stop notification from the attach. - { "direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$", "capture":{1:"stop_signal_hex"} }, - ], True) - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the process id matches what we expected. - pid_text = process_info.get('pid', None) - self.assertIsNotNone(pid_text) - reported_pid = int(pid_text, base=16) - self.assertEqual(reported_pid, inferior.pid) - - @debugserver_test - @dsym_test - def test_attach_with_vAttach_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach_manually() - self.attach_with_vAttach() - - @llgs_test - @dwarf_test - def test_attach_with_vAttach_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach_manually() - self.attach_with_vAttach() - - -if __name__ == '__main__': - unittest2.main() -import gdbremote_testcase -import lldbgdbserverutils -import unittest2 - -from lldbtest import * - -class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase): - - def attach_with_vAttach(self): - # Start the inferior, start the debug monitor, nothing is attached yet. - procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:60"]) - self.assertIsNotNone(procs) - - # Make sure the target process has been launched. - inferior = procs.get("inferior") - self.assertIsNotNone(inferior) - self.assertTrue(inferior.pid > 0) - self.assertTrue(lldbgdbserverutils.process_is_running(inferior.pid, True)) - - # Add attach packets. - self.test_sequence.add_log_lines([ - # Do the attach. - "read packet: $vAttach;{:x}#00".format(inferior.pid), - # Expect a stop notification from the attach. - { "direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$", "capture":{1:"stop_signal_hex"} }, - ], True) - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the process id matches what we expected. - pid_text = process_info.get('pid', None) - self.assertIsNotNone(pid_text) - reported_pid = int(pid_text, base=16) - self.assertEqual(reported_pid, inferior.pid) - - @debugserver_test - @dsym_test - def test_attach_with_vAttach_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach_manually() - self.attach_with_vAttach() - - @llgs_test - @dwarf_test - def test_attach_with_vAttach_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach_manually() - self.attach_with_vAttach() - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemoteAuxvSupport.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteAuxvSupport.py +++ /dev/null @@ -1,208 +0,0 @@ -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase): - - AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read" - - def has_auxv_support(self): - inferior_args = ["message:main entered", "sleep:5"] - procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) - - # Don't do anything until we match the launched inferior main entry output. - # Then immediately interrupt the process. - # This prevents auxv data being asked for before it's ready and leaves - # us in a stopped state. - self.test_sequence.add_log_lines([ - # Start the inferior... - "read packet: $c#63", - # ... match output.... - { "type":"output_match", "regex":r"^message:main entered\r\n$" }, - ], True) - # ... then interrupt. - self.add_interrupt_packets() - self.add_qSupported_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - features = self.parse_qSupported_response(context) - return self.AUXV_SUPPORT_FEATURE_NAME in features and features[self.AUXV_SUPPORT_FEATURE_NAME] == "+" - - def get_raw_auxv_data(self): - # Start up llgs and inferior, and check for auxv support. - if not self.has_auxv_support(): - self.skipTest("auxv data not supported") - - # Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target. - # Auxv is specified in terms of pairs of unsigned longs. - self.reset_test_sequence() - self.add_process_info_collection_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - proc_info = self.parse_process_info_response(context) - self.assertIsNotNone(proc_info) - self.assertTrue("ptrsize" in proc_info) - word_size = int(proc_info["ptrsize"]) - - OFFSET = 0 - LENGTH = 0x400 - - # Grab the auxv data. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - "read packet: $qXfer:auxv:read::{:x},{:x}:#00".format(OFFSET, LENGTH), - {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} } - ], True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Ensure we end up with all auxv data in one packet. - # FIXME don't assume it all comes back in one packet. - self.assertEquals(context.get("response_type"), "l") - - # Decode binary data. - content_raw = context.get("content_raw") - self.assertIsNotNone(content_raw) - return (word_size, self.decode_gdbremote_binary(content_raw)) - - def supports_auxv(self): - # When non-auxv platforms support llgs, skip the test on platforms - # that don't support auxv. - self.assertTrue(self.has_auxv_support()) - - # - # We skip the "supports_auxv" test on debugserver. The rest of the tests - # appropriately skip the auxv tests if the support flag is not present - # in the qSupported response, so the debugserver test bits are still there - # in case debugserver code one day does have auxv support and thus those - # tests don't get skipped. - # - - @llgs_test - @dwarf_test - def test_supports_auxv_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.supports_auxv() - - def auxv_data_is_correct_size(self): - (word_size, auxv_data) = self.get_raw_auxv_data() - self.assertIsNotNone(auxv_data) - - # Ensure auxv data is a multiple of 2*word_size (there should be two unsigned long fields per auxv entry). - self.assertEquals(len(auxv_data) % (2*word_size), 0) - # print "auxv contains {} entries".format(len(auxv_data) / (2*word_size)) - - @debugserver_test - @dsym_test - def test_auxv_data_is_correct_size_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.auxv_data_is_correct_size() - - @llgs_test - @dwarf_test - def test_auxv_data_is_correct_size_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.auxv_data_is_correct_size() - - def auxv_keys_look_valid(self): - (word_size, auxv_data) = self.get_raw_auxv_data() - self.assertIsNotNone(auxv_data) - - # Grab endian. - self.reset_test_sequence() - self.add_process_info_collection_packets() - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - endian = process_info.get("endian") - self.assertIsNotNone(endian) - - auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) - self.assertIsNotNone(auxv_dict) - - # Verify keys look reasonable. - for auxv_key in auxv_dict: - self.assertTrue(auxv_key >= 1) - self.assertTrue(auxv_key <= 1000) - # print "auxv dict: {}".format(auxv_dict) - - @debugserver_test - @dsym_test - def test_auxv_keys_look_valid_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.auxv_keys_look_valid() - - @llgs_test - @dwarf_test - def test_auxv_keys_look_valid_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.auxv_keys_look_valid() - - def auxv_chunked_reads_work(self): - # Verify that multiple smaller offset,length reads of auxv data - # return the same data as a single larger read. - - # Grab the auxv data with a single large read here. - (word_size, auxv_data) = self.get_raw_auxv_data() - self.assertIsNotNone(auxv_data) - - # Grab endian. - self.reset_test_sequence() - self.add_process_info_collection_packets() - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - endian = process_info.get("endian") - self.assertIsNotNone(endian) - - auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) - self.assertIsNotNone(auxv_dict) - - iterated_auxv_data = self.read_binary_data_in_chunks("qXfer:auxv:read::", 2*word_size) - self.assertIsNotNone(iterated_auxv_data) - - auxv_dict_iterated = self.build_auxv_dict(endian, word_size, iterated_auxv_data) - self.assertIsNotNone(auxv_dict_iterated) - - # Verify both types of data collection returned same content. - self.assertEquals(auxv_dict_iterated, auxv_dict) - - @debugserver_test - @dsym_test - def test_auxv_chunked_reads_work_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.auxv_chunked_reads_work() - - @llgs_test - @dwarf_test - def test_auxv_chunked_reads_work_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.auxv_chunked_reads_work() - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemoteExpeditedRegisters.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteExpeditedRegisters.py +++ /dev/null @@ -1,154 +0,0 @@ -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemoteExpeditedRegisters(gdbremote_testcase.GdbRemoteTestCaseBase): - - def gather_expedited_registers(self): - # Setup the stub and set the gdb remote command stream. - procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) - self.test_sequence.add_log_lines([ - # Start up the inferior. - "read packet: $c#63", - # Immediately tell it to stop. We want to see what it reports. - "read packet: {}".format(chr(03)), - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, - ], True) - - # Run the gdb remote command stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Pull out expedited registers. - key_vals_text = context.get("key_vals_text") - self.assertIsNotNone(key_vals_text) - - expedited_registers = self.extract_registers_from_stop_notification(key_vals_text) - self.assertIsNotNone(expedited_registers) - - return expedited_registers - - def stop_notification_contains_generic_register(self, generic_register_name): - # Generate a stop reply, parse out expedited registers from stop notification. - expedited_registers = self.gather_expedited_registers() - self.assertIsNotNone(expedited_registers) - self.assertTrue(len(expedited_registers) > 0) - - # Gather target register infos. - reg_infos = self.gather_register_infos() - - # Find the generic register. - reg_info = self.find_generic_register_with_name(reg_infos, generic_register_name) - self.assertIsNotNone(reg_info) - - # Ensure the expedited registers contained it. - self.assertTrue(reg_info["lldb_register_index"] in expedited_registers) - # print "{} reg_info:{}".format(generic_register_name, reg_info) - - def stop_notification_contains_any_registers(self): - # Generate a stop reply, parse out expedited registers from stop notification. - expedited_registers = self.gather_expedited_registers() - # Verify we have at least one expedited register. - self.assertTrue(len(expedited_registers) > 0) - - @debugserver_test - @dsym_test - def test_stop_notification_contains_any_registers_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_notification_contains_any_registers() - - @llgs_test - @dwarf_test - def test_stop_notification_contains_any_registers_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_notification_contains_any_registers() - - def stop_notification_contains_no_duplicate_registers(self): - # Generate a stop reply, parse out expedited registers from stop notification. - expedited_registers = self.gather_expedited_registers() - # Verify no expedited register was specified multiple times. - for (reg_num, value) in expedited_registers.items(): - if (type(value) == list) and (len(value) > 0): - self.fail("expedited register number {} specified more than once ({} times)".format(reg_num, len(value))) - - @debugserver_test - @dsym_test - def test_stop_notification_contains_no_duplicate_registers_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_notification_contains_no_duplicate_registers() - - @llgs_test - @dwarf_test - def test_stop_notification_contains_no_duplicate_registers_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_notification_contains_no_duplicate_registers() - - def stop_notification_contains_pc_register(self): - self.stop_notification_contains_generic_register("pc") - - @debugserver_test - @dsym_test - def test_stop_notification_contains_pc_register_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_notification_contains_pc_register() - - @llgs_test - @dwarf_test - def test_stop_notification_contains_pc_register_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_notification_contains_pc_register() - - def stop_notification_contains_fp_register(self): - self.stop_notification_contains_generic_register("fp") - - @debugserver_test - @dsym_test - def test_stop_notification_contains_fp_register_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_notification_contains_fp_register() - - @llgs_test - @dwarf_test - def test_stop_notification_contains_fp_register_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_notification_contains_fp_register() - - def stop_notification_contains_sp_register(self): - self.stop_notification_contains_generic_register("sp") - - @debugserver_test - @dsym_test - def test_stop_notification_contains_sp_register_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_notification_contains_sp_register() - - @llgs_test - @dwarf_test - def test_stop_notification_contains_sp_register_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_notification_contains_sp_register() - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemoteKill.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteKill.py +++ /dev/null @@ -1,49 +0,0 @@ -import unittest2 - -import gdbremote_testcase -import lldbgdbserverutils - -from lldbtest import * - -class TestGdbRemoteKill(gdbremote_testcase.GdbRemoteTestCaseBase): - def attach_commandline_kill_after_initial_stop(self): - procs = self.prep_debug_monitor_and_inferior() - self.test_sequence.add_log_lines([ - "read packet: $k#6b", - {"direction":"send", "regex":r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}" }, - ], True) - - if self.stub_sends_two_stop_notifications_on_kill: - # Add an expectation for a second X result for stubs that send two of these. - self.test_sequence.add_log_lines([ - {"direction":"send", "regex":r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}" }, - ], True) - - self.expect_gdbremote_sequence() - - # Wait a moment for completed and now-detached inferior process to clear. - time.sleep(1) - - # Process should be dead now. Reap results. - poll_result = procs["inferior"].poll() - self.assertIsNotNone(poll_result) - - # Where possible, verify at the system level that the process is not running. - self.assertFalse(lldbgdbserverutils.process_is_running(procs["inferior"].pid, False)) - - @debugserver_test - @dsym_test - def test_attach_commandline_kill_after_initial_stop_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.attach_commandline_kill_after_initial_stop() - - @llgs_test - @dwarf_test - def test_attach_commandline_kill_after_initial_stop_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.attach_commandline_kill_after_initial_stop() - Index: test/tools/lldb-gdbserver/TestGdbRemoteProcessInfo.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteProcessInfo.py +++ /dev/null @@ -1,188 +0,0 @@ -import gdbremote_testcase -import lldbgdbserverutils -import sys -import unittest2 - -from lldbtest import * - -class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase): - - def qProcessInfo_returns_running_process(self): - procs = self.prep_debug_monitor_and_inferior() - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the process id looks reasonable. - pid_text = process_info.get("pid") - self.assertIsNotNone(pid_text) - pid = int(pid_text, base=16) - self.assertNotEqual(0, pid) - - # If possible, verify that the process is running. - self.assertTrue(lldbgdbserverutils.process_is_running(pid, True)) - - @debugserver_test - @dsym_test - def test_qProcessInfo_returns_running_process_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.qProcessInfo_returns_running_process() - - @llgs_test - @dwarf_test - def test_qProcessInfo_returns_running_process_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.qProcessInfo_returns_running_process() - - def attach_commandline_qProcessInfo_reports_correct_pid(self): - procs = self.prep_debug_monitor_and_inferior() - self.assertIsNotNone(procs) - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the process id matches what we expected. - pid_text = process_info.get('pid', None) - self.assertIsNotNone(pid_text) - reported_pid = int(pid_text, base=16) - self.assertEqual(reported_pid, procs["inferior"].pid) - - @debugserver_test - @dsym_test - def test_attach_commandline_qProcessInfo_reports_correct_pid_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.attach_commandline_qProcessInfo_reports_correct_pid() - - @llgs_test - @dwarf_test - def test_attach_commandline_qProcessInfo_reports_correct_pid_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.attach_commandline_qProcessInfo_reports_correct_pid() - - def qProcessInfo_reports_valid_endian(self): - procs = self.prep_debug_monitor_and_inferior() - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the process id looks reasonable. - endian = process_info.get("endian") - self.assertIsNotNone(endian) - self.assertTrue(endian in ["little", "big", "pdp"]) - - @debugserver_test - @dsym_test - def test_qProcessInfo_reports_valid_endian_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.qProcessInfo_reports_valid_endian() - - @llgs_test - @dwarf_test - def test_qProcessInfo_reports_valid_endian_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.qProcessInfo_reports_valid_endian() - - def qProcessInfo_contains_keys(self, expected_key_set): - procs = self.prep_debug_monitor_and_inferior() - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the expected keys are present and non-None within the process info. - missing_key_set = set() - for expected_key in expected_key_set: - if expected_key not in process_info: - missing_key_set.add(expected_key) - - self.assertEquals(missing_key_set, set(), "the listed keys are missing in the qProcessInfo result") - - def qProcessInfo_does_not_contain_keys(self, absent_key_set): - procs = self.prep_debug_monitor_and_inferior() - self.add_process_info_collection_packets() - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info response - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - - # Ensure the unexpected keys are not present - unexpected_key_set = set() - for unexpected_key in absent_key_set: - if unexpected_key in process_info: - unexpected_key_set.add(unexpected_key) - - self.assertEquals(unexpected_key_set, set(), "the listed keys were present but unexpected in qProcessInfo result") - - @unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin") - @debugserver_test - @dsym_test - def test_qProcessInfo_contains_cputype_cpusubtype_debugserver_darwin(self): - self.init_debugserver_test() - self.buildDsym() - self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype'])) - - @unittest2.skipUnless(sys.platform.startswith("linux"), "requires Linux") - @llgs_test - @dwarf_test - def test_qProcessInfo_contains_triple_llgs_linux(self): - self.init_llgs_test() - self.buildDwarf() - self.qProcessInfo_contains_keys(set(['triple'])) - - @unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin") - @debugserver_test - @dsym_test - def test_qProcessInfo_does_not_contain_triple_debugserver_darwin(self): - self.init_debugserver_test() - self.buildDsym() - # We don't expect to see triple on darwin. If we do, we'll prefer triple - # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup - # for the remote Host and Process. - self.qProcessInfo_does_not_contain_keys(set(['triple'])) - - @unittest2.skipUnless(sys.platform.startswith("linux"), "requires Linux") - @llgs_test - @dwarf_test - def test_qProcessInfo_does_not_contain_cputype_cpusubtype_llgs_linux(self): - self.init_llgs_test() - self.buildDwarf() - self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype'])) - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemoteRegisterState.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteRegisterState.py +++ /dev/null @@ -1,130 +0,0 @@ -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemoteRegisterState(gdbremote_testcase.GdbRemoteTestCaseBase): - """Test QSaveRegisterState/QRestoreRegisterState support.""" - - def grp_register_save_restore_works(self, with_suffix): - # Start up the process, use thread suffix, grab main thread id. - inferior_args = ["message:main entered", "sleep:5"] - procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) - - self.add_process_info_collection_packets() - self.add_register_info_collection_packets() - if with_suffix: - self.add_thread_suffix_request_packets() - self.add_threadinfo_collection_packets() - self.test_sequence.add_log_lines([ - # Start the inferior... - "read packet: $c#63", - # ... match output.... - { "type":"output_match", "regex":r"^message:main entered\r\n$" }, - ], True) - # ... then interrupt. - self.add_interrupt_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info. - process_info = self.parse_process_info_response(context) - endian = process_info.get("endian") - self.assertIsNotNone(endian) - - # Gather register info. - reg_infos = self.parse_register_info_packets(context) - self.assertIsNotNone(reg_infos) - self.add_lldb_register_index(reg_infos) - - # Pull out the register infos that we think we can bit flip successfully. - gpr_reg_infos = [reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] - self.assertTrue(len(gpr_reg_infos) > 0) - - # Gather thread info. - if with_suffix: - threads = self.parse_threadinfo_packets(context) - self.assertIsNotNone(threads) - thread_id = threads[0] - self.assertIsNotNone(thread_id) - # print "Running on thread: 0x{:x}".format(thread_id) - else: - thread_id = None - - # Save register state. - self.reset_test_sequence() - self.add_QSaveRegisterState_packets(thread_id) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - (success, state_id) = self.parse_QSaveRegisterState_response(context) - self.assertTrue(success) - self.assertIsNotNone(state_id) - # print "saved register state id: {}".format(state_id) - - # Remember initial register values. - initial_reg_values = self.read_register_values(gpr_reg_infos, endian, thread_id=thread_id) - # print "initial_reg_values: {}".format(initial_reg_values) - - # Flip gpr register values. - (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(gpr_reg_infos, endian, thread_id=thread_id) - # print "successful writes: {}, failed writes: {}".format(successful_writes, failed_writes) - self.assertTrue(successful_writes > 0) - - flipped_reg_values = self.read_register_values(gpr_reg_infos, endian, thread_id=thread_id) - # print "flipped_reg_values: {}".format(flipped_reg_values) - - # Restore register values. - self.reset_test_sequence() - self.add_QRestoreRegisterState_packets(state_id, thread_id) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify registers match initial register values. - final_reg_values = self.read_register_values(gpr_reg_infos, endian, thread_id=thread_id) - # print "final_reg_values: {}".format(final_reg_values) - self.assertIsNotNone(final_reg_values) - self.assertEquals(final_reg_values, initial_reg_values) - - @debugserver_test - @dsym_test - def test_grp_register_save_restore_works_with_suffix_debugserver_dsym(self): - USE_THREAD_SUFFIX = True - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.grp_register_save_restore_works(USE_THREAD_SUFFIX) - - @llgs_test - @dwarf_test - def test_grp_register_save_restore_works_with_suffix_llgs_dwarf(self): - USE_THREAD_SUFFIX = True - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.grp_register_save_restore_works(USE_THREAD_SUFFIX) - - @debugserver_test - @dsym_test - def test_grp_register_save_restore_works_no_suffix_debugserver_dsym(self): - USE_THREAD_SUFFIX = False - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.grp_register_save_restore_works(USE_THREAD_SUFFIX) - - @llgs_test - @dwarf_test - def test_grp_register_save_restore_works_no_suffix_llgs_dwarf(self): - USE_THREAD_SUFFIX = False - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.grp_register_save_restore_works(USE_THREAD_SUFFIX) - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemoteSingleStep.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteSingleStep.py +++ /dev/null @@ -1,25 +0,0 @@ -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase): - - @debugserver_test - @dsym_test - def test_single_step_only_steps_one_instruction_with_s_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="s") - - @llgs_test - @dwarf_test - def test_single_step_only_steps_one_instruction_with_s_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="s") - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemoteThreadsInStopReply.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemoteThreadsInStopReply.py +++ /dev/null @@ -1,172 +0,0 @@ -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemoteThreadsInStopReply(gdbremote_testcase.GdbRemoteTestCaseBase): - - ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [ - "read packet: $QListThreadsInStopReply#21", - "send packet: $OK#00", - ] - - def gather_stop_reply_threads(self, post_startup_log_lines, thread_count): - # Set up the inferior args. - inferior_args=[] - for i in range(thread_count - 1): - inferior_args.append("thread:new") - inferior_args.append("sleep:10") - procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) - - # Assumes test_sequence has anything added needed to setup the initial state. - # (Like optionally enabling QThreadsInStopReply.) - if post_startup_log_lines: - self.test_sequence.add_log_lines(post_startup_log_lines, True) - self.test_sequence.add_log_lines([ - "read packet: $c#63" - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Give threads time to start up, then break. - time.sleep(1) - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - "read packet: {}".format(chr(03)), - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Wait until all threads have started. - threads = self.wait_for_thread_count(thread_count, timeout_seconds=3) - self.assertIsNotNone(threads) - self.assertEquals(len(threads), thread_count) - - # Run, then stop the process, grab the stop reply content. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - "read packet: $c#63", - "read packet: {}".format(chr(03)), - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Parse the stop reply contents. - key_vals_text = context.get("key_vals_text") - self.assertIsNotNone(key_vals_text) - kv_dict = self.parse_key_val_dict(key_vals_text) - self.assertIsNotNone(kv_dict) - - # Pull out threads from stop response. - stop_reply_threads_text = kv_dict.get("threads") - if stop_reply_threads_text: - return [int(thread_id, 16) for thread_id in stop_reply_threads_text.split(",")] - else: - return [] - - def QListThreadsInStopReply_supported(self): - procs = self.prep_debug_monitor_and_inferior() - self.test_sequence.add_log_lines(self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - @debugserver_test - @dsym_test - def test_QListThreadsInStopReply_supported_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.QListThreadsInStopReply_supported() - - @llgs_test - @dwarf_test - def test_QListThreadsInStopReply_supported_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.QListThreadsInStopReply_supported() - - def stop_reply_reports_multiple_threads(self, thread_count): - # Gather threads from stop notification when QThreadsInStopReply is enabled. - stop_reply_threads = self.gather_stop_reply_threads(self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) - self.assertEquals(len(stop_reply_threads), thread_count) - - @debugserver_test - @dsym_test - def test_stop_reply_reports_multiple_threads_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_reply_reports_multiple_threads(5) - - @llgs_test - @dwarf_test - def test_stop_reply_reports_multiple_threads_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_reply_reports_multiple_threads(5) - - def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count): - # Gather threads from stop notification when QThreadsInStopReply is not enabled. - stop_reply_threads = self.gather_stop_reply_threads(None, thread_count) - self.assertEquals(len(stop_reply_threads), 0) - - @debugserver_test - @dsym_test - def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.no_QListThreadsInStopReply_supplies_no_threads(5) - - @llgs_test - @dwarf_test - def test_no_QListThreadsInStopReply_supplies_no_threads_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.no_QListThreadsInStopReply_supplies_no_threads(5) - - def stop_reply_reports_correct_threads(self, thread_count): - # Gather threads from stop notification when QThreadsInStopReply is enabled. - stop_reply_threads = self.gather_stop_reply_threads(self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) - self.assertEquals(len(stop_reply_threads), thread_count) - - # Gather threads from q{f,s}ThreadInfo. - self.reset_test_sequence() - self.add_threadinfo_collection_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - threads = self.parse_threadinfo_packets(context) - self.assertIsNotNone(threads) - self.assertEquals(len(threads), thread_count) - - # Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads - for tid in threads: - self.assertTrue(tid in stop_reply_threads) - - @debugserver_test - @dsym_test - def test_stop_reply_reports_correct_threads_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.stop_reply_reports_correct_threads(5) - - @llgs_test - @dwarf_test - def test_stop_reply_reports_correct_threads_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.stop_reply_reports_correct_threads(5) - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemote_qThreadStopInfo.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemote_qThreadStopInfo.py +++ /dev/null @@ -1,155 +0,0 @@ -import sys -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemote_qThreadStopInfo(gdbremote_testcase.GdbRemoteTestCaseBase): - - THREAD_COUNT = 5 - - def gather_stop_replies_via_qThreadStopInfo(self, thread_count): - # Set up the inferior args. - inferior_args=[] - for i in range(thread_count - 1): - inferior_args.append("thread:new") - inferior_args.append("sleep:10") - procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) - - # Assumes test_sequence has anything added needed to setup the initial state. - # (Like optionally enabling QThreadsInStopReply.) - self.test_sequence.add_log_lines([ - "read packet: $c#63" - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Give threads time to start up, then break. - time.sleep(1) - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - "read packet: {}".format(chr(03)), - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Wait until all threads have started. - threads = self.wait_for_thread_count(thread_count, timeout_seconds=3) - self.assertIsNotNone(threads) - self.assertEquals(len(threads), thread_count) - - # Grab stop reply for each thread via qThreadStopInfo{tid:hex}. - stop_replies = {} - thread_dicts = {} - for thread in threads: - # Run the qThreadStopInfo command. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - "read packet: $qThreadStopInfo{:x}#00".format(thread), - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Parse stop reply contents. - key_vals_text = context.get("key_vals_text") - self.assertIsNotNone(key_vals_text) - kv_dict = self.parse_key_val_dict(key_vals_text) - self.assertIsNotNone(kv_dict) - - # Verify there is a thread and that it matches the expected thread id. - kv_thread = kv_dict.get("thread") - self.assertIsNotNone(kv_thread) - kv_thread_id = int(kv_thread, 16) - self.assertEquals(kv_thread_id, thread) - - # Grab the stop id reported. - stop_result_text = context.get("stop_result") - self.assertIsNotNone(stop_result_text) - stop_replies[kv_thread_id] = int(stop_result_text, 16) - - # Hang on to the key-val dictionary for the thread. - thread_dicts[kv_thread_id] = kv_dict - - return (stop_replies, thread_dicts) - - def qThreadStopInfo_works_for_multiple_threads(self, thread_count): - (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) - self.assertEquals(len(stop_replies), thread_count) - - @debugserver_test - @dsym_test - def test_qThreadStopInfo_works_for_multiple_threads_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) - - @llgs_test - @dwarf_test - def test_qThreadStopInfo_works_for_multiple_threads_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) - - def qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self, thread_count): - (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) - self.assertIsNotNone(stop_replies) - - no_stop_reason_count = sum(1 for stop_reason in stop_replies.values() if stop_reason == 0) - with_stop_reason_count = sum(1 for stop_reason in stop_replies.values() if stop_reason != 0) - - # All but one thread should report no stop reason. - self.assertEqual(no_stop_reason_count, thread_count - 1) - - # Only one thread should should indicate a stop reason. - self.assertEqual(with_stop_reason_count, 1) - - @debugserver_test - @dsym_test - def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self.THREAD_COUNT) - - @llgs_test - @dwarf_test - def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self.THREAD_COUNT) - - def qThreadStopInfo_has_valid_thread_names(self, thread_count, expected_thread_name): - (_, thread_dicts) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) - self.assertIsNotNone(thread_dicts) - - for thread_dict in thread_dicts.values(): - name = thread_dict.get("name") - self.assertIsNotNone(name) - self.assertEquals(name, expected_thread_name) - - @unittest2.skip("MacOSX doesn't have a default thread name") - @debugserver_test - @dsym_test - def test_qThreadStopInfo_has_valid_thread_names_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") - - @unittest2.skipUnless(sys.platform.startswith("linux"), "test requires OS with set, equal thread names by default") - @llgs_test - @dwarf_test - def test_qThreadStopInfo_has_valid_thread_names_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestGdbRemote_vCont.py =================================================================== --- test/tools/lldb-gdbserver/TestGdbRemote_vCont.py +++ /dev/null @@ -1,125 +0,0 @@ -import unittest2 - -import gdbremote_testcase -from lldbtest import * - -class TestGdbRemote_vCont(gdbremote_testcase.GdbRemoteTestCaseBase): - - def vCont_supports_mode(self, mode, inferior_args=None): - # Setup the stub and set the gdb remote command stream. - procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) - self.add_vCont_query_packets() - - # Run the gdb remote command stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Pull out supported modes. - supported_vCont_modes = self.parse_vCont_query_response(context) - self.assertIsNotNone(supported_vCont_modes) - - # Verify we support the given mode. - self.assertTrue(mode in supported_vCont_modes) - - def vCont_supports_c(self): - self.vCont_supports_mode("c") - - def vCont_supports_C(self): - self.vCont_supports_mode("C") - - def vCont_supports_s(self): - self.vCont_supports_mode("s") - - def vCont_supports_S(self): - self.vCont_supports_mode("S") - - @debugserver_test - @dsym_test - def test_vCont_supports_c_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.vCont_supports_c() - - @llgs_test - def test_vCont_supports_c_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.vCont_supports_c() - - @debugserver_test - @dsym_test - def test_vCont_supports_C_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.vCont_supports_C() - - @llgs_test - @dwarf_test - def test_vCont_supports_C_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.vCont_supports_C() - - @debugserver_test - @dsym_test - def test_vCont_supports_s_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.vCont_supports_s() - - @llgs_test - @dwarf_test - def test_vCont_supports_s_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.vCont_supports_s() - - @debugserver_test - @dsym_test - def test_vCont_supports_S_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.vCont_supports_S() - - @llgs_test - @dwarf_test - def test_vCont_supports_S_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.vCont_supports_S() - - @debugserver_test - @dsym_test - def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="vCont;s") - - @llgs_test - @dwarf_test - def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="vCont;s") - - @debugserver_test - @dsym_test - def test_single_step_only_steps_one_instruction_with_vCont_s_thread_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.single_step_only_steps_one_instruction(use_Hc_packet=False, step_instruction="vCont;s:{thread}") - - @llgs_test - @dwarf_test - def test_single_step_only_steps_one_instruction_with_vCont_s_thread_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.single_step_only_steps_one_instruction(use_Hc_packet=False, step_instruction="vCont;s:{thread}") - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/TestLldbGdbServer.py =================================================================== --- test/tools/lldb-gdbserver/TestLldbGdbServer.py +++ /dev/null @@ -1,1501 +0,0 @@ -""" -Test case for testing the gdbremote protocol. - -Tests run against debugserver and lldb-gdbserver (llgs). -lldb-gdbserver tests run where the lldb-gdbserver exe is -available. - -This class will be broken into smaller test case classes by -gdb remote packet functional areas. For now it contains -the initial set of tests implemented. -""" - -import gdbremote_testcase -import lldbgdbserverutils -import platform -import signal -import unittest2 -from lldbtest import * - -class LldbGdbServerTestCase(gdbremote_testcase.GdbRemoteTestCaseBase): - - @debugserver_test - def test_exe_starts_debugserver(self): - self.init_debugserver_test() - server = self.connect_to_debug_monitor() - - @llgs_test - def test_exe_starts_llgs(self): - self.init_llgs_test() - server = self.connect_to_debug_monitor() - - def start_no_ack_mode(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - self.add_no_ack_remote_stream() - self.expect_gdbremote_sequence() - - @debugserver_test - def test_start_no_ack_mode_debugserver(self): - self.init_debugserver_test() - self.start_no_ack_mode() - - @llgs_test - def test_start_no_ack_mode_llgs(self): - self.init_llgs_test() - self.start_no_ack_mode() - - def thread_suffix_supported(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - self.add_no_ack_remote_stream() - self.test_sequence.add_log_lines( - ["lldb-gdbserver < 26> read packet: $QThreadSuffixSupported#e4", - "lldb-gdbserver < 6> send packet: $OK#9a"], - True) - - self.expect_gdbremote_sequence() - - @debugserver_test - def test_thread_suffix_supported_debugserver(self): - self.init_debugserver_test() - self.thread_suffix_supported() - - @llgs_test - def test_thread_suffix_supported_llgs(self): - self.init_llgs_test() - self.thread_suffix_supported() - - def list_threads_in_stop_reply_supported(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - self.add_no_ack_remote_stream() - self.test_sequence.add_log_lines( - ["lldb-gdbserver < 27> read packet: $QListThreadsInStopReply#21", - "lldb-gdbserver < 6> send packet: $OK#9a"], - True) - self.expect_gdbremote_sequence() - - @debugserver_test - def test_list_threads_in_stop_reply_supported_debugserver(self): - self.init_debugserver_test() - self.list_threads_in_stop_reply_supported() - - @llgs_test - def test_list_threads_in_stop_reply_supported_llgs(self): - self.init_llgs_test() - self.list_threads_in_stop_reply_supported() - - def start_inferior(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # build launch args - launch_args = [os.path.abspath('a.out')] - - self.add_no_ack_remote_stream() - self.test_sequence.add_log_lines( - ["read packet: %s" % lldbgdbserverutils.build_gdbremote_A_packet(launch_args), - "send packet: $OK#9a"], - True) - self.expect_gdbremote_sequence() - - @debugserver_test - @dsym_test - def test_start_inferior_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.start_inferior() - - @llgs_test - @dwarf_test - def test_start_inferior_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.start_inferior() - - def inferior_exit_0(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # build launch args - launch_args = [os.path.abspath('a.out')] - - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.test_sequence.add_log_lines( - ["read packet: $vCont;c#a8", - "send packet: $W00#00"], - True) - - self.expect_gdbremote_sequence() - - @debugserver_test - @dsym_test - def test_inferior_exit_0_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.inferior_exit_0() - - @llgs_test - @dwarf_test - def test_inferior_exit_0_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.inferior_exit_0() - - def inferior_exit_42(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - RETVAL = 42 - - # build launch args - launch_args = [os.path.abspath('a.out'), "retval:%d" % RETVAL] - - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.test_sequence.add_log_lines( - ["read packet: $vCont;c#a8", - "send packet: $W{0:02x}#00".format(RETVAL)], - True) - - self.expect_gdbremote_sequence() - - @debugserver_test - @dsym_test - def test_inferior_exit_42_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.inferior_exit_42() - - @llgs_test - @dwarf_test - def test_inferior_exit_42_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.inferior_exit_42() - - def c_packet_works(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # build launch args - launch_args = [os.path.abspath('a.out')] - - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.test_sequence.add_log_lines( - ["read packet: $c#63", - "send packet: $W00#00"], - True) - - self.expect_gdbremote_sequence() - - @debugserver_test - @dsym_test - def test_c_packet_works_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.c_packet_works() - - @llgs_test - @dwarf_test - def test_c_packet_works_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.c_packet_works() - - def inferior_print_exit(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # build launch args - launch_args = [os.path.abspath('a.out'), "hello, world"] - - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.test_sequence.add_log_lines( - ["read packet: $vCont;c#a8", - {"type":"output_match", "regex":r"^hello, world\r\n$" }, - "send packet: $W00#00"], - True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - @debugserver_test - @dsym_test - def test_inferior_print_exit_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.inferior_print_exit() - - @llgs_test - @dwarf_test - def test_inferior_print_exit_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.inferior_print_exit() - - def first_launch_stop_reply_thread_matches_first_qC(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # build launch args - launch_args = [os.path.abspath('a.out'), "hello, world"] - - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.test_sequence.add_log_lines( - ["read packet: $qC#00", - { "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} }, - "read packet: $?#00", - { "direction":"send", "regex":r"^\$T[0-9a-fA-F]{2}thread:([0-9a-fA-F]+)", "expect_captures":{1:"thread_id"} }], - True) - self.expect_gdbremote_sequence() - - @debugserver_test - @dsym_test - def test_first_launch_stop_reply_thread_matches_first_qC_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.first_launch_stop_reply_thread_matches_first_qC() - - @llgs_test - @dwarf_test - def test_first_launch_stop_reply_thread_matches_first_qC_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.first_launch_stop_reply_thread_matches_first_qC() - - def attach_commandline_continue_app_exits(self): - procs = self.prep_debug_monitor_and_inferior() - self.test_sequence.add_log_lines( - ["read packet: $vCont;c#a8", - "send packet: $W00#00"], - True) - self.expect_gdbremote_sequence() - - # Wait a moment for completed and now-detached inferior process to clear. - time.sleep(1) - - # Process should be dead now. Reap results. - poll_result = procs["inferior"].poll() - self.assertIsNotNone(poll_result) - - # Where possible, verify at the system level that the process is not running. - self.assertFalse(lldbgdbserverutils.process_is_running(procs["inferior"].pid, False)) - - @debugserver_test - @dsym_test - def test_attach_commandline_continue_app_exits_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.attach_commandline_continue_app_exits() - - @llgs_test - @dwarf_test - def test_attach_commandline_continue_app_exits_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.attach_commandline_continue_app_exits() - - def qRegisterInfo_returns_one_valid_result(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # Build launch args - launch_args = [os.path.abspath('a.out')] - - # Build the expected protocol stream - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.test_sequence.add_log_lines( - ["read packet: $qRegisterInfo0#00", - { "direction":"send", "regex":r"^\$(.+);#[0-9A-Fa-f]{2}", "capture":{1:"reginfo_0"} }], - True) - - # Run the stream - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - reg_info_packet = context.get("reginfo_0") - self.assertIsNotNone(reg_info_packet) - self.assert_valid_reg_info(lldbgdbserverutils.parse_reg_info_response(reg_info_packet)) - - @debugserver_test - @dsym_test - def test_qRegisterInfo_returns_one_valid_result_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.qRegisterInfo_returns_one_valid_result() - - @llgs_test - @dwarf_test - def test_qRegisterInfo_returns_one_valid_result_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.qRegisterInfo_returns_one_valid_result() - - def qRegisterInfo_returns_all_valid_results(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # Build launch args. - launch_args = [os.path.abspath('a.out')] - - # Build the expected protocol stream. - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.add_register_info_collection_packets() - - # Run the stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Validate that each register info returned validates. - for reg_info in self.parse_register_info_packets(context): - self.assert_valid_reg_info(reg_info) - - @debugserver_test - @dsym_test - def test_qRegisterInfo_returns_all_valid_results_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.qRegisterInfo_returns_all_valid_results() - - @llgs_test - @dwarf_test - def test_qRegisterInfo_returns_all_valid_results_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.qRegisterInfo_returns_all_valid_results() - - def qRegisterInfo_contains_required_generics(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # Build launch args - launch_args = [os.path.abspath('a.out')] - - # Build the expected protocol stream - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.add_register_info_collection_packets() - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather register info entries. - reg_infos = self.parse_register_info_packets(context) - - # Collect all generic registers found. - generic_regs = { reg_info['generic']:1 for reg_info in reg_infos if 'generic' in reg_info } - - # Ensure we have a program counter register. - self.assertTrue('pc' in generic_regs) - - # Ensure we have a frame pointer register. - self.assertTrue('fp' in generic_regs) - - # Ensure we have a stack pointer register. - self.assertTrue('sp' in generic_regs) - - # Ensure we have a flags register. - self.assertTrue('flags' in generic_regs) - - @debugserver_test - @dsym_test - def test_qRegisterInfo_contains_required_generics_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.qRegisterInfo_contains_required_generics() - - @llgs_test - @dwarf_test - def test_qRegisterInfo_contains_required_generics_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.qRegisterInfo_contains_required_generics() - - def qRegisterInfo_contains_at_least_one_register_set(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # Build launch args - launch_args = [os.path.abspath('a.out')] - - # Build the expected protocol stream - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.add_register_info_collection_packets() - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather register info entries. - reg_infos = self.parse_register_info_packets(context) - - # Collect all register sets found. - register_sets = { reg_info['set']:1 for reg_info in reg_infos if 'set' in reg_info } - self.assertTrue(len(register_sets) >= 1) - - @debugserver_test - @dsym_test - def test_qRegisterInfo_contains_at_least_one_register_set_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.qRegisterInfo_contains_at_least_one_register_set() - - @llgs_test - @dwarf_test - def test_qRegisterInfo_contains_at_least_one_register_set_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.qRegisterInfo_contains_at_least_one_register_set() - - def qRegisterInfo_contains_avx_registers_on_linux_x86_64(self): - server = self.connect_to_debug_monitor() - self.assertIsNotNone(server) - - # Build launch args - launch_args = [os.path.abspath('a.out')] - - # Build the expected protocol stream - self.add_no_ack_remote_stream() - self.add_verified_launch_packets(launch_args) - self.add_register_info_collection_packets() - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather register info entries. - reg_infos = self.parse_register_info_packets(context) - - # Collect all generics found. - register_sets = { reg_info['set']:1 for reg_info in reg_infos if 'set' in reg_info } - self.assertTrue("Advanced Vector Extensions" in register_sets) - - @llgs_test - @dwarf_test - def test_qRegisterInfo_contains_avx_registers_on_linux_x86_64_llgs_dwarf(self): - # Skip this test if not Linux x86_64. - if platform.system() != "Linux" or platform.processor() != "x86_64": - self.skipTest("linux x86_64 test") - - self.init_llgs_test() - self.buildDwarf() - self.qRegisterInfo_contains_avx_registers_on_linux_x86_64() - - def qThreadInfo_contains_thread(self): - procs = self.prep_debug_monitor_and_inferior() - self.add_threadinfo_collection_packets() - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather threadinfo entries. - threads = self.parse_threadinfo_packets(context) - self.assertIsNotNone(threads) - - # We should have exactly one thread. - self.assertEqual(len(threads), 1) - - @debugserver_test - @dsym_test - def test_qThreadInfo_contains_thread_launch_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qThreadInfo_contains_thread() - - @llgs_test - @dwarf_test - def test_qThreadInfo_contains_thread_launch_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qThreadInfo_contains_thread() - - @debugserver_test - @dsym_test - def test_qThreadInfo_contains_thread_attach_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.qThreadInfo_contains_thread() - - @llgs_test - @dwarf_test - def test_qThreadInfo_contains_thread_attach_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.qThreadInfo_contains_thread() - - def qThreadInfo_matches_qC(self): - procs = self.prep_debug_monitor_and_inferior() - - self.add_threadinfo_collection_packets() - self.test_sequence.add_log_lines( - ["read packet: $qC#00", - { "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} } - ], True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather threadinfo entries. - threads = self.parse_threadinfo_packets(context) - self.assertIsNotNone(threads) - - # We should have exactly one thread from threadinfo. - self.assertEqual(len(threads), 1) - - # We should have a valid thread_id from $QC. - QC_thread_id_hex = context.get("thread_id") - self.assertIsNotNone(QC_thread_id_hex) - QC_thread_id = int(QC_thread_id_hex, 16) - - # Those two should be the same. - self.assertEquals(threads[0], QC_thread_id) - - @debugserver_test - @dsym_test - def test_qThreadInfo_matches_qC_launch_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qThreadInfo_matches_qC() - - @llgs_test - @dwarf_test - def test_qThreadInfo_matches_qC_launch_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qThreadInfo_matches_qC() - - @debugserver_test - @dsym_test - def test_qThreadInfo_matches_qC_attach_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.qThreadInfo_matches_qC() - - @llgs_test - @dwarf_test - def test_qThreadInfo_matches_qC_attach_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.qThreadInfo_matches_qC() - - def p_returns_correct_data_size_for_each_qRegisterInfo(self): - procs = self.prep_debug_monitor_and_inferior() - self.add_register_info_collection_packets() - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather register info entries. - reg_infos = self.parse_register_info_packets(context) - self.assertIsNotNone(reg_infos) - self.assertTrue(len(reg_infos) > 0) - - # Read value for each register. - reg_index = 0 - for reg_info in reg_infos: - # Skip registers that don't have a register set. For x86, these are - # the DRx registers, which have no LLDB-kind register number and thus - # cannot be read via normal NativeRegisterContext::ReadRegister(reg_info,...) calls. - if not "set" in reg_info: - continue - - # Clear existing packet expectations. - self.reset_test_sequence() - - # Run the register query - self.test_sequence.add_log_lines( - ["read packet: $p{0:x}#00".format(reg_index), - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }], - True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify the response length. - p_response = context.get("p_response") - self.assertIsNotNone(p_response) - self.assertEquals(len(p_response), 2 * int(reg_info["bitsize"]) / 8) - - # Increment loop - reg_index += 1 - - @debugserver_test - @dsym_test - def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.p_returns_correct_data_size_for_each_qRegisterInfo() - - @llgs_test - @dwarf_test - def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.p_returns_correct_data_size_for_each_qRegisterInfo() - - @debugserver_test - @dsym_test - def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.p_returns_correct_data_size_for_each_qRegisterInfo() - - @llgs_test - @dwarf_test - def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.p_returns_correct_data_size_for_each_qRegisterInfo() - - def Hg_switches_to_3_threads(self): - # Startup the inferior with three threads (main + 2 new ones). - procs = self.prep_debug_monitor_and_inferior(inferior_args=["thread:new", "thread:new"]) - - # Let the inferior process have a few moments to start up the thread when launched. (The launch scenario has no time to run, so threads won't be there yet.) - self.run_process_then_stop(run_seconds=1) - - # Wait at most x seconds for 3 threads to be present. - threads = self.wait_for_thread_count(3, timeout_seconds=5) - self.assertEquals(len(threads), 3) - - # verify we can $H to each thead, and $qC matches the thread we set. - for thread in threads: - # Change to each thread, verify current thread id. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $Hg{0:x}#00".format(thread), # Set current thread. - "send packet: $OK#00", - "read packet: $qC#00", - { "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} }], - True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify the thread id. - self.assertIsNotNone(context.get("thread_id")) - self.assertEquals(int(context.get("thread_id"), 16), thread) - - @debugserver_test - @dsym_test - def test_Hg_switches_to_3_threads_launch_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.Hg_switches_to_3_threads() - - @llgs_test - @dwarf_test - def test_Hg_switches_to_3_threads_launch_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.Hg_switches_to_3_threads() - - @debugserver_test - @dsym_test - def test_Hg_switches_to_3_threads_attach_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_attach() - self.Hg_switches_to_3_threads() - - @llgs_test - @dwarf_test - def test_Hg_switches_to_3_threads_attach_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_attach() - self.Hg_switches_to_3_threads() - - def Hc_then_Csignal_signals_correct_thread(self, segfault_signo): - # NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached, - # and the test requires getting stdout from the exe. - - NUM_THREADS = 3 - - # Startup the inferior with three threads (main + NUM_THREADS-1 worker threads). - # inferior_args=["thread:print-ids"] - inferior_args=["thread:segfault"] - for i in range(NUM_THREADS - 1): - # if i > 0: - # Give time between thread creation/segfaulting for the handler to work. - # inferior_args.append("sleep:1") - inferior_args.append("thread:new") - inferior_args.append("sleep:10") - - # Launch/attach. (In our case, this should only ever be launched since we need inferior stdout/stderr). - procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) - self.test_sequence.add_log_lines(["read packet: $c#63"], True) - context = self.expect_gdbremote_sequence() - - # Let the inferior process have a few moments to start up the thread when launched. - # context = self.run_process_then_stop(run_seconds=1) - - # Wait at most x seconds for all threads to be present. - # threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5) - # self.assertEquals(len(threads), NUM_THREADS) - - signaled_tids = {} - print_thread_ids = {} - - # Switch to each thread, deliver a signal, and verify signal delivery - for i in range(NUM_THREADS - 1): - # Run until SIGSEGV comes in. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - [{"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"signo", 2:"thread_id"} } - ], True) - - context = self.expect_gdbremote_sequence(timeout_seconds=10) - self.assertIsNotNone(context) - signo = context.get("signo") - self.assertEqual(int(signo, 16), segfault_signo) - - # Ensure we haven't seen this tid yet. - thread_id = int(context.get("thread_id"), 16) - self.assertFalse(thread_id in signaled_tids) - signaled_tids[thread_id] = 1 - - # Send SIGUSR1 to the thread that signaled the SIGSEGV. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - [ - # Set the continue thread. - "read packet: $Hc{0:x}#00".format(thread_id), # Set current thread. - "send packet: $OK#00", - - # Continue sending the signal number to the continue thread. - # The commented out packet is a way to do this same operation without using - # a $Hc (but this test is testing $Hc, so we'll stick with the former). - "read packet: $C{0:x}#00".format(signal.SIGUSR1), - # "read packet: $vCont;C{0:x}:{1:x};c#00".format(signal.SIGUSR1, thread_id), - - # FIXME: Linux does not report the thread stop on the delivered signal (SIGUSR1 here). MacOSX debugserver does. - # But MacOSX debugserver isn't guaranteeing the thread the signal handler runs on, so currently its an XFAIL. - # Need to rectify behavior here. The linux behavior is more intuitive to me since we're essentially swapping out - # an about-to-be-delivered signal (for which we already sent a stop packet) to a different signal. - # {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, - # "read packet: $c#63", - { "type":"output_match", "regex":r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\nthread ([0-9a-fA-F]+): past SIGSEGV\r\n", "capture":{ 1:"print_thread_id", 2:"post_handle_thread_id" } }, - ], - True) - - # Run the sequence. - context = self.expect_gdbremote_sequence(timeout_seconds=10) - self.assertIsNotNone(context) - - # Ensure the stop signal is the signal we delivered. - # stop_signo = context.get("stop_signo") - # self.assertIsNotNone(stop_signo) - # self.assertEquals(int(stop_signo,16), signal.SIGUSR1) - - # Ensure the stop thread is the thread to which we delivered the signal. - # stop_thread_id = context.get("stop_thread_id") - # self.assertIsNotNone(stop_thread_id) - # self.assertEquals(int(stop_thread_id,16), thread_id) - - # Ensure we haven't seen this thread id yet. The inferior's self-obtained thread ids are not guaranteed to match the stub tids (at least on MacOSX). - print_thread_id = context.get("print_thread_id") - self.assertIsNotNone(print_thread_id) - print_thread_id = int(print_thread_id, 16) - self.assertFalse(print_thread_id in print_thread_ids) - - # Now remember this print (i.e. inferior-reflected) thread id and ensure we don't hit it again. - print_thread_ids[print_thread_id] = 1 - - # Ensure post signal-handle thread id matches the thread that initially raised the SIGSEGV. - post_handle_thread_id = context.get("post_handle_thread_id") - self.assertIsNotNone(post_handle_thread_id) - post_handle_thread_id = int(post_handle_thread_id, 16) - self.assertEquals(post_handle_thread_id, print_thread_id) - - @debugserver_test - @dsym_test - @unittest2.expectedFailure() - def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - # Darwin debugserver translates some signals like SIGSEGV into some gdb expectations about fixed signal numbers. - self.Hc_then_Csignal_signals_correct_thread(self.TARGET_EXC_BAD_ACCESS) - - @llgs_test - @dwarf_test - def test_Hc_then_Csignal_signals_correct_thread_launch_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.Hc_then_Csignal_signals_correct_thread(signal.SIGSEGV) - - def m_packet_reads_memory(self): - # This is the memory we will write into the inferior and then ensure we can read back with $m. - MEMORY_CONTENTS = "Test contents 0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz" - - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior( - inferior_args=["set-message:%s" % MEMORY_CONTENTS, "get-data-address-hex:g_message", "sleep:5"]) - - # Run the process - self.test_sequence.add_log_lines( - [ - # Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the message buffer within the inferior. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^data address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"message_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Grab the message address. - self.assertIsNotNone(context.get("message_address")) - message_address = int(context.get("message_address"), 16) - - # Grab contents from the inferior. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $m{0:x},{1:x}#00".format(message_address, len(MEMORY_CONTENTS)), - {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"read_contents"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Ensure what we read from inferior memory is what we wrote. - self.assertIsNotNone(context.get("read_contents")) - read_contents = context.get("read_contents").decode("hex") - self.assertEquals(read_contents, MEMORY_CONTENTS) - - @debugserver_test - @dsym_test - def test_m_packet_reads_memory_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.m_packet_reads_memory() - - @llgs_test - @dwarf_test - def test_m_packet_reads_memory_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.m_packet_reads_memory() - - def qMemoryRegionInfo_is_supported(self): - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior() - - # Ask if it supports $qMemoryRegionInfo. - self.test_sequence.add_log_lines( - ["read packet: $qMemoryRegionInfo#00", - "send packet: $OK#00" - ], True) - self.expect_gdbremote_sequence() - - @debugserver_test - @dsym_test - def test_qMemoryRegionInfo_is_supported_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_is_supported() - - @llgs_test - @dwarf_test - def test_qMemoryRegionInfo_is_supported_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_is_supported() - - def qMemoryRegionInfo_reports_code_address_as_executable(self): - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior( - inferior_args=["get-code-address-hex:hello", "sleep:5"]) - - # Run the process - self.test_sequence.add_log_lines( - [ - # Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the message buffer within the inferior. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"code_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Grab the code address. - self.assertIsNotNone(context.get("code_address")) - code_address = int(context.get("code_address"), 16) - - # Grab memory region info from the inferior. - self.reset_test_sequence() - self.add_query_memory_region_packets(code_address) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - mem_region_dict = self.parse_memory_region_packet(context) - - # Ensure there are no errors reported. - self.assertFalse("error" in mem_region_dict) - - # Ensure code address is readable and executable. - self.assertTrue("permissions" in mem_region_dict) - self.assertTrue("r" in mem_region_dict["permissions"]) - self.assertTrue("x" in mem_region_dict["permissions"]) - - # Ensure the start address and size encompass the address we queried. - self.assert_address_within_memory_region(code_address, mem_region_dict) - - @debugserver_test - @dsym_test - def test_qMemoryRegionInfo_reports_code_address_as_executable_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_reports_code_address_as_executable() - - @llgs_test - @dwarf_test - def test_qMemoryRegionInfo_reports_code_address_as_executable_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_reports_code_address_as_executable() - - def qMemoryRegionInfo_reports_stack_address_as_readable_writeable(self): - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior( - inferior_args=["get-stack-address-hex:", "sleep:5"]) - - # Run the process - self.test_sequence.add_log_lines( - [ - # Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the message buffer within the inferior. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^stack address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"stack_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Grab the address. - self.assertIsNotNone(context.get("stack_address")) - stack_address = int(context.get("stack_address"), 16) - - # Grab memory region info from the inferior. - self.reset_test_sequence() - self.add_query_memory_region_packets(stack_address) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - mem_region_dict = self.parse_memory_region_packet(context) - - # Ensure there are no errors reported. - self.assertFalse("error" in mem_region_dict) - - # Ensure address is readable and executable. - self.assertTrue("permissions" in mem_region_dict) - self.assertTrue("r" in mem_region_dict["permissions"]) - self.assertTrue("w" in mem_region_dict["permissions"]) - - # Ensure the start address and size encompass the address we queried. - self.assert_address_within_memory_region(stack_address, mem_region_dict) - - @debugserver_test - @dsym_test - def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() - - @llgs_test - @dwarf_test - def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() - - def qMemoryRegionInfo_reports_heap_address_as_readable_writeable(self): - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior( - inferior_args=["get-heap-address-hex:", "sleep:5"]) - - # Run the process - self.test_sequence.add_log_lines( - [ - # Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the message buffer within the inferior. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^heap address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"heap_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Grab the address. - self.assertIsNotNone(context.get("heap_address")) - heap_address = int(context.get("heap_address"), 16) - - # Grab memory region info from the inferior. - self.reset_test_sequence() - self.add_query_memory_region_packets(heap_address) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - mem_region_dict = self.parse_memory_region_packet(context) - - # Ensure there are no errors reported. - self.assertFalse("error" in mem_region_dict) - - # Ensure address is readable and executable. - self.assertTrue("permissions" in mem_region_dict) - self.assertTrue("r" in mem_region_dict["permissions"]) - self.assertTrue("w" in mem_region_dict["permissions"]) - - # Ensure the start address and size encompass the address we queried. - self.assert_address_within_memory_region(heap_address, mem_region_dict) - - - @debugserver_test - @dsym_test - def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() - - @llgs_test - @dwarf_test - def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() - - def software_breakpoint_set_and_remove_work(self): - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior( - inferior_args=["get-code-address-hex:hello", "sleep:1", "call-function:hello"]) - - # Run the process - self.add_register_info_collection_packets() - self.add_process_info_collection_packets() - self.test_sequence.add_log_lines( - [# Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the function call entry point. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"function_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Gather process info - we need endian of target to handle register value conversions. - process_info = self.parse_process_info_response(context) - endian = process_info.get("endian") - self.assertIsNotNone(endian) - - # Gather register info entries. - reg_infos = self.parse_register_info_packets(context) - (pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_infos) - self.assertIsNotNone(pc_lldb_reg_index) - self.assertIsNotNone(pc_reg_info) - - # Grab the function address. - self.assertIsNotNone(context.get("function_address")) - function_address = int(context.get("function_address"), 16) - - # Set the breakpoint. - # Note this might need to be switched per platform (ARM, mips, etc.). - BREAKPOINT_KIND = 1 - self.reset_test_sequence() - self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify the stop signal reported was the breakpoint signal number. - stop_signo = context.get("stop_signo") - self.assertIsNotNone(stop_signo) - self.assertEquals(int(stop_signo,16), signal.SIGTRAP) - - # Ensure we did not receive any output. If the breakpoint was not set, we would - # see output (from a launched process with captured stdio) printing a hello, world message. - # That would indicate the breakpoint didn't take. - self.assertEquals(len(context["O_content"]), 0) - - # Verify that the PC for the main thread is where we expect it - right at the breakpoint address. - # This acts as a another validation on the register reading code. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - [ - # Print the PC. This should match the breakpoint address. - "read packet: $p{0:x}#00".format(pc_lldb_reg_index), - # Capture $p results. - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, - ], True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify the PC is where we expect. Note response is in endianness of the inferior. - p_response = context.get("p_response") - self.assertIsNotNone(p_response) - - # Convert from target endian to int. - returned_pc = lldbgdbserverutils.unpack_register_hex_unsigned(endian, p_response) - self.assertEquals(returned_pc, function_address) - - # Verify that a breakpoint remove and continue gets us the expected output. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - [ - # Remove the breakpoint. - "read packet: $z0,{0:x},{1}#00".format(function_address, BREAKPOINT_KIND), - # Verify the stub could unset it. - "send packet: $OK#00", - # Continue running. - "read packet: $c#63", - # We should now receive the output from the call. - { "type":"output_match", "regex":r"^hello, world\r\n$" }, - # And wait for program completion. - {"direction":"send", "regex":r"^\$W00(.*)#[0-9a-fA-F]{2}$" }, - ], True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - @debugserver_test - @dsym_test - def test_software_breakpoint_set_and_remove_work_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.software_breakpoint_set_and_remove_work() - - @llgs_test - @dwarf_test - def test_software_breakpoint_set_and_remove_work_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.software_breakpoint_set_and_remove_work() - - def qSupported_returns_known_stub_features(self): - # Start up the stub and start/prep the inferior. - procs = self.prep_debug_monitor_and_inferior() - self.add_qSupported_packets() - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Retrieve the qSupported features. - supported_dict = self.parse_qSupported_response(context) - self.assertIsNotNone(supported_dict) - self.assertTrue(len(supported_dict) > 0) - - @debugserver_test - @dsym_test - def test_qSupported_returns_known_stub_features_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.qSupported_returns_known_stub_features() - - @llgs_test - @dwarf_test - def test_qSupported_returns_known_stub_features_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.qSupported_returns_known_stub_features() - - def written_M_content_reads_back_correctly(self): - TEST_MESSAGE = "Hello, memory" - - # Start up the stub and start/prep the inferior. - procs = self.prep_debug_monitor_and_inferior(inferior_args=["set-message:xxxxxxxxxxxxxX", "get-data-address-hex:g_message", "sleep:1", "print-message:"]) - self.test_sequence.add_log_lines( - [ - # Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the message buffer within the inferior. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^data address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"message_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Grab the message address. - self.assertIsNotNone(context.get("message_address")) - message_address = int(context.get("message_address"), 16) - - # Hex-encode the test message, adding null termination. - hex_encoded_message = TEST_MESSAGE.encode("hex") - - # Write the message to the inferior. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $M{0:x},{1:x}:{2}#00".format(message_address, len(hex_encoded_message)/2, hex_encoded_message), - "send packet: $OK#00", - "read packet: $c#63", - { "type":"output_match", "regex":r"^message: (.+)\r\n$", "capture":{ 1:"printed_message"} }, - "send packet: $W00#00", - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Ensure what we read from inferior memory is what we wrote. - printed_message = context.get("printed_message") - self.assertIsNotNone(printed_message) - self.assertEquals(printed_message, TEST_MESSAGE + "X") - - @debugserver_test - @dsym_test - def test_written_M_content_reads_back_correctly_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.written_M_content_reads_back_correctly() - - @llgs_test - @dwarf_test - def test_written_M_content_reads_back_correctly_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.written_M_content_reads_back_correctly() - - def P_writes_all_gpr_registers(self): - # Start inferior debug session, grab all register info. - procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) - self.add_register_info_collection_packets() - self.add_process_info_collection_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Process register infos. - reg_infos = self.parse_register_info_packets(context) - self.assertIsNotNone(reg_infos) - self.add_lldb_register_index(reg_infos) - - # Process endian. - process_info = self.parse_process_info_response(context) - endian = process_info.get("endian") - self.assertIsNotNone(endian) - - # Pull out the register infos that we think we can bit flip successfully,. - gpr_reg_infos = [reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] - self.assertTrue(len(gpr_reg_infos) > 0) - - # Write flipped bit pattern of existing value to each register. - (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(gpr_reg_infos, endian) - # print "successful writes: {}, failed writes: {}".format(successful_writes, failed_writes) - self.assertTrue(successful_writes > 0) - - # Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp). - # Come back to this. I have the test rigged to verify that at least some of the bit-flip writes work. - @debugserver_test - @dsym_test - def test_P_writes_all_gpr_registers_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.P_writes_all_gpr_registers() - - @llgs_test - @dwarf_test - def test_P_writes_all_gpr_registers_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.P_writes_all_gpr_registers() - - def P_and_p_thread_suffix_work(self): - # Startup the inferior with three threads. - procs = self.prep_debug_monitor_and_inferior(inferior_args=["thread:new", "thread:new"]) - self.add_thread_suffix_request_packets() - self.add_register_info_collection_packets() - self.add_process_info_collection_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - process_info = self.parse_process_info_response(context) - self.assertIsNotNone(process_info) - endian = process_info.get("endian") - self.assertIsNotNone(endian) - - reg_infos = self.parse_register_info_packets(context) - self.assertIsNotNone(reg_infos) - self.add_lldb_register_index(reg_infos) - - reg_index = self.select_modifiable_register(reg_infos) - self.assertIsNotNone(reg_index) - reg_byte_size = int(reg_infos[reg_index]["bitsize"]) / 8 - self.assertTrue(reg_byte_size > 0) - - # Run the process a bit so threads can start up, and collect register info. - context = self.run_process_then_stop(run_seconds=1) - self.assertIsNotNone(context) - - # Wait for 3 threads to be present. - threads = self.wait_for_thread_count(3, timeout_seconds=5) - self.assertEquals(len(threads), 3) - - expected_reg_values = [] - register_increment = 1 - next_value = None - - # Set the same register in each of 3 threads to a different value. - # Verify each one has the unique value. - for thread in threads: - # If we don't have a next value yet, start it with the initial read value + 1 - if not next_value: - # Read pre-existing register value. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Set the next value to use for writing as the increment plus current value. - p_response = context.get("p_response") - self.assertIsNotNone(p_response) - next_value = lldbgdbserverutils.unpack_register_hex_unsigned(endian, p_response) - - # Set new value using P and thread suffix. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $P{0:x}={1};thread:{2:x}#00".format(reg_index, lldbgdbserverutils.pack_register_hex(endian, next_value, byte_size=reg_byte_size), thread), - "send packet: $OK#00", - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Save the value we set. - expected_reg_values.append(next_value) - - # Increment value for next thread to use (we want them all different so we can verify they wrote to each thread correctly next.) - next_value += register_increment - - # Revisit each thread and verify they have the expected value set for the register we wrote. - thread_index = 0 - for thread in threads: - # Read pre-existing register value. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Get the register value. - p_response = context.get("p_response") - self.assertIsNotNone(p_response) - read_value = lldbgdbserverutils.unpack_register_hex_unsigned(endian, p_response) - - # Make sure we read back what we wrote. - self.assertEquals(read_value, expected_reg_values[thread_index]) - thread_index += 1 - - # Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp). - @debugserver_test - @dsym_test - def test_P_and_p_thread_suffix_work_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.set_inferior_startup_launch() - self.P_and_p_thread_suffix_work() - - @llgs_test - @dwarf_test - def test_P_and_p_thread_suffix_work_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.set_inferior_startup_launch() - self.P_and_p_thread_suffix_work() - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/commandline/TestStubReverseConnect.py =================================================================== --- test/tools/lldb-gdbserver/commandline/TestStubReverseConnect.py +++ /dev/null @@ -1,86 +0,0 @@ -# Add the directory above ours to the python library path since we -# will import from there. -import os.path -import sys -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) - -import gdbremote_testcase -import re -import select -import socket -import time -from lldbtest import * - -class TestStubReverseConnect(gdbremote_testcase.GdbRemoteTestCaseBase): - _DEFAULT_TIMEOUT = 20 - - def setUp(self): - # Set up the test. - gdbremote_testcase.GdbRemoteTestCaseBase.setUp(self) - - # Create a listener on a local port. - self.listener_socket = self.create_listener_socket() - self.assertIsNotNone(self.listener_socket) - self.listener_port = self.listener_socket.getsockname()[1] - - def create_listener_socket(self, timeout_seconds=_DEFAULT_TIMEOUT): - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.assertIsNotNone(sock) - - sock.settimeout(timeout_seconds) - sock.bind(("127.0.0.1",0)) - sock.listen(1) - - def tear_down_listener(): - try: - sock.shutdown(socket.SHUT_RDWR) - except: - # ignore - None - - self.addTearDownHook(tear_down_listener) - return sock - - def reverse_connect_works(self): - # Indicate stub startup should do a reverse connect. - appended_stub_args = " --reverse-connect" - if self.debug_monitor_extra_args: - self.debug_monitor_extra_args += appended_stub_args - else: - self.debug_monitor_extra_args = appended_stub_args - - self.stub_hostname = "127.0.0.1" - self.port = self.listener_port - - # Start the stub. - server = self.launch_debug_monitor(logfile=sys.stdout) - self.assertIsNotNone(server) - self.assertTrue(server.isalive()) - - # Listen for the stub's connection to us. - (stub_socket, address) = self.listener_socket.accept() - self.assertIsNotNone(stub_socket) - self.assertIsNotNone(address) - print "connected to stub {} on {}".format(address, stub_socket.getsockname()) - - # Verify we can do the handshake. If that works, we'll call it good. - self.do_handshake(stub_socket, timeout_seconds=self._DEFAULT_TIMEOUT) - - # Clean up. - stub_socket.shutdown(socket.SHUT_RDWR) - - @debugserver_test - def test_reverse_connect_works_debugserver(self): - self.init_debugserver_test(use_named_pipe=False) - self.set_inferior_startup_launch() - self.reverse_connect_works() - - @llgs_test - def test_reverse_connect_works_llgs(self): - self.init_llgs_test(use_named_pipe=False) - self.set_inferior_startup_launch() - self.reverse_connect_works() - - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-gdbserver/commandline/TestStubSetSID.py =================================================================== --- test/tools/lldb-gdbserver/commandline/TestStubSetSID.py +++ /dev/null @@ -1,85 +0,0 @@ -import unittest2 - -# Add the directory above ours to the python library path since we -# will import from there. -import os.path -import sys -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) - -import gdbremote_testcase -import os -import select -import tempfile -import time -from lldbtest import * - - -def get_common_stub_args(): - return [] if 'darwin' in sys.platform else ['g'] - - -class TestStubSetSIDTestCase(gdbremote_testcase.GdbRemoteTestCaseBase): - def get_stub_sid(self, extra_stub_args=None): - # Launch debugserver - if extra_stub_args: - self.debug_monitor_extra_args = extra_stub_args - else: - self.debug_monitor_extra_args = "" - - server = self.launch_debug_monitor() - self.assertIsNotNone(server) - self.assertTrue(server.isalive()) - server.expect("(debugserver|lldb-gdbserver)", timeout=10) - - # Get the process id for the stub. - return os.getsid(server.pid) - - def sid_is_same_without_setsid(self): - stub_sid = self.get_stub_sid() - self.assertEquals(stub_sid, os.getsid(0)) - - def sid_is_different_with_setsid(self): - stub_sid = self.get_stub_sid(" %s --setsid" % ' '.join(get_common_stub_args())) - self.assertNotEquals(stub_sid, os.getsid(0)) - - def sid_is_different_with_S(self): - stub_sid = self.get_stub_sid(" %s -S" % ' '.join(get_common_stub_args())) - self.assertNotEquals(stub_sid, os.getsid(0)) - - @debugserver_test - @unittest2.expectedFailure() # This is the whole purpose of this feature, I would expect it to be the same without --setsid. Investigate. - def test_sid_is_same_without_setsid_debugserver(self): - self.init_debugserver_test() - self.set_inferior_startup_launch() - self.sid_is_same_without_setsid() - - @llgs_test - @unittest2.expectedFailure() # This is the whole purpose of this feature, I would expect it to be the same without --setsid. Investigate. - def test_sid_is_same_without_setsid_llgs(self): - self.init_llgs_test() - self.set_inferior_startup_launch() - self.sid_is_same_without_setsid() - - @debugserver_test - def test_sid_is_different_with_setsid_debugserver(self): - self.init_debugserver_test() - self.set_inferior_startup_launch() - self.sid_is_different_with_setsid() - - @llgs_test - def test_sid_is_different_with_setsid_llgs(self): - self.init_llgs_test() - self.set_inferior_startup_launch() - self.sid_is_different_with_setsid() - - @debugserver_test - def test_sid_is_different_with_S_debugserver(self): - self.init_debugserver_test() - self.set_inferior_startup_launch() - self.sid_is_different_with_S() - - @llgs_test - def test_sid_is_different_with_S_llgs(self): - self.init_llgs_test() - self.set_inferior_startup_launch() - self.sid_is_different_with_S() Index: test/tools/lldb-gdbserver/gdbremote_testcase.py =================================================================== --- test/tools/lldb-gdbserver/gdbremote_testcase.py +++ /dev/null @@ -1,1208 +0,0 @@ -""" -Base class for gdb-remote test cases. -""" - -import errno -import os -import os.path -import platform -import random -import re -import select -import sets -import signal -import socket -import subprocess -import sys -import tempfile -import time -import unittest2 -from lldbtest import * -from lldbgdbserverutils import * -import logging - -class GdbRemoteTestCaseBase(TestBase): - - mydir = TestBase.compute_mydir(__file__) - - _TIMEOUT_SECONDS = 5 - - _GDBREMOTE_KILL_PACKET = "$k#6b" - - _LOGGING_LEVEL = logging.WARNING - # _LOGGING_LEVEL = logging.DEBUG - - # Start the inferior separately, attach to the inferior on the stub command line. - _STARTUP_ATTACH = "attach" - # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid). - _STARTUP_ATTACH_MANUALLY = "attach_manually" - # Start the stub, and launch the inferior with an $A packet via the initial packet stream. - _STARTUP_LAUNCH = "launch" - - # GDB Signal numbers that are not target-specific used for common exceptions - TARGET_EXC_BAD_ACCESS = 0x91 - TARGET_EXC_BAD_INSTRUCTION = 0x92 - TARGET_EXC_ARITHMETIC = 0x93 - TARGET_EXC_EMULATION = 0x94 - TARGET_EXC_SOFTWARE = 0x95 - TARGET_EXC_BREAKPOINT = 0x96 - - def setUp(self): - TestBase.setUp(self) - FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s' - logging.basicConfig(format=FORMAT) - self.logger = logging.getLogger(__name__) - self.logger.setLevel(self._LOGGING_LEVEL) - self.test_sequence = GdbRemoteTestSequence(self.logger) - self.set_inferior_startup_launch() - self.port = self.get_next_port() - self.named_pipe_path = None - self.named_pipe = None - self.named_pipe_fd = None - self.stub_sends_two_stop_notifications_on_kill = False - self.stub_hostname = "localhost" - - def get_next_port(self): - return 12000 + random.randint(0,3999) - - def reset_test_sequence(self): - self.test_sequence = GdbRemoteTestSequence(self.logger) - - def create_named_pipe(self): - # Create a temp dir and name for a pipe. - temp_dir = tempfile.mkdtemp() - named_pipe_path = os.path.join(temp_dir, "stub_port_number") - - # Create the named pipe. - os.mkfifo(named_pipe_path) - - # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not. - named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK) - - # Create the file for the named pipe. Note this will follow semantics of - # a non-blocking read side of a named pipe, which has different semantics - # than a named pipe opened for read in non-blocking mode. - named_pipe = os.fdopen(named_pipe_fd, "r") - self.assertIsNotNone(named_pipe) - - def shutdown_named_pipe(): - # Close the pipe. - try: - named_pipe.close() - except: - print "failed to close named pipe" - None - - # Delete the pipe. - try: - os.remove(named_pipe_path) - except: - print "failed to delete named pipe: {}".format(named_pipe_path) - None - - # Delete the temp directory. - try: - os.rmdir(temp_dir) - except: - print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir)) - None - - # Add the shutdown hook to clean up the named pipe. - self.addTearDownHook(shutdown_named_pipe) - - # Clear the port so the stub selects a port number. - self.port = 0 - - return (named_pipe_path, named_pipe, named_pipe_fd) - - def get_stub_port_from_named_socket(self, read_timeout_seconds=5): - # Wait for something to read with a max timeout. - (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds) - self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.") - self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.") - - # Read the port from the named pipe. - stub_port_raw = self.named_pipe.read() - self.assertIsNotNone(stub_port_raw) - self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe") - - # Trim null byte, convert to int. - stub_port_raw = stub_port_raw[:-1] - stub_port = int(stub_port_raw) - self.assertTrue(stub_port > 0) - - return stub_port - - def init_llgs_test(self, use_named_pipe=True): - self.debug_monitor_exe = get_lldb_gdbserver_exe() - if not self.debug_monitor_exe: - self.skipTest("lldb_gdbserver exe not found") - dname = os.path.join(os.environ["LLDB_TEST"], - os.environ["LLDB_SESSION_DIRNAME"]) - self.debug_monitor_extra_args = " gdbserver -c 'log enable -T -f {}/process-{}.log lldb break process thread' -c 'log enable -T -f {}/packets-{}.log gdb-remote packets'".format(dname, self.id(), dname, self.id()) - if use_named_pipe: - (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe() - - def init_debugserver_test(self, use_named_pipe=True): - self.debug_monitor_exe = get_debugserver_exe() - if not self.debug_monitor_exe: - self.skipTest("debugserver exe not found") - self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName) - if use_named_pipe: - (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe() - # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification - # when the process truly dies. - self.stub_sends_two_stop_notifications_on_kill = True - - def create_socket(self): - sock = socket.socket() - logger = self.logger - - def shutdown_socket(): - if sock: - try: - # send the kill packet so lldb-gdbserver shuts down gracefully - sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET) - except: - logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0])) - - try: - sock.close() - except: - logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0])) - - self.addTearDownHook(shutdown_socket) - - connect_info = (self.stub_hostname, self.port) - # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1]) - sock.connect(connect_info) - - return sock - - def set_inferior_startup_launch(self): - self._inferior_startup = self._STARTUP_LAUNCH - - def set_inferior_startup_attach(self): - self._inferior_startup = self._STARTUP_ATTACH - - def set_inferior_startup_attach_manually(self): - self._inferior_startup = self._STARTUP_ATTACH_MANUALLY - - def get_debug_monitor_command_line(self, attach_pid=None): - commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port) - if attach_pid: - commandline += " --attach=%d" % attach_pid - if self.named_pipe_path: - commandline += " --named-pipe %s" % self.named_pipe_path - return commandline - - def launch_debug_monitor(self, attach_pid=None, logfile=None): - # Create the command line. - import pexpect - commandline = self.get_debug_monitor_command_line(attach_pid=attach_pid) - - # Start the server. - server = pexpect.spawn(commandline, logfile=logfile) - self.assertIsNotNone(server) - server.expect(r"(debugserver|lldb-gdbserver)", timeout=10) - - # If we're receiving the stub's listening port from the named pipe, do that here. - if self.named_pipe: - self.port = self.get_stub_port_from_named_socket() - # print "debug server listening on {}".format(self.port) - - # Turn on logging for what the child sends back. - if self.TraceOn(): - server.logfile_read = sys.stdout - - return server - - def connect_to_debug_monitor(self, attach_pid=None): - if self.named_pipe: - # Create the stub. - server = self.launch_debug_monitor(attach_pid=attach_pid) - self.assertIsNotNone(server) - - def shutdown_debug_monitor(): - try: - server.close() - except: - logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0])) - self.addTearDownHook(shutdown_debug_monitor) - - # Schedule debug monitor to be shut down during teardown. - logger = self.logger - - # Attach to the stub and return a socket opened to it. - self.sock = self.create_socket() - return server - - # We're using a random port algorithm to try not to collide with other ports, - # and retry a max # times. - attempts = 0 - MAX_ATTEMPTS = 20 - - while attempts < MAX_ATTEMPTS: - server = self.launch_debug_monitor(attach_pid=attach_pid) - - # Wait until we receive the server ready message before continuing. - port_good = True - try: - server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port)) - except: - port_good = False - server.close() - - if port_good: - # Schedule debug monitor to be shut down during teardown. - logger = self.logger - def shutdown_debug_monitor(): - try: - server.close() - except: - logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0])) - self.addTearDownHook(shutdown_debug_monitor) - - # Create a socket to talk to the server - try: - self.sock = self.create_socket() - return server - except socket.error as serr: - # We're only trying to handle connection refused. - if serr.errno != errno.ECONNREFUSED: - raise serr - # We should close the server here to be safe. - server.close() - - # Increment attempts. - print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS)) - attempts += 1 - - # And wait a random length of time before next attempt, to avoid collisions. - time.sleep(random.randint(1,5)) - - # Now grab a new port number. - self.port = self.get_next_port() - - raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts) - - def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3, exe_path=None): - # We're going to start a child process that the debug monitor stub can later attach to. - # This process needs to be started so that it just hangs around for a while. We'll - # have it sleep. - if not exe_path: - exe_path = os.path.abspath("a.out") - - args = [exe_path] - if inferior_args: - args.extend(inferior_args) - if sleep_seconds: - args.append("sleep:%d" % sleep_seconds) - - return subprocess.Popen(args) - - def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None): - """Prep the debug monitor, the inferior, and the expected packet stream. - - Handle the separate cases of using the debug monitor in attach-to-inferior mode - and in launch-inferior mode. - - For attach-to-inferior mode, the inferior process is first started, then - the debug monitor is started in attach to pid mode (using --attach on the - stub command line), and the no-ack-mode setup is appended to the packet - stream. The packet stream is not yet executed, ready to have more expected - packet entries added to it. - - For launch-inferior mode, the stub is first started, then no ack mode is - setup on the expected packet stream, then the verified launch packets are added - to the expected socket stream. The packet stream is not yet executed, ready - to have more expected packet entries added to it. - - The return value is: - {inferior:, server:} - """ - inferior = None - attach_pid = None - - if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY: - # Launch the process that we'll use as the inferior. - inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path) - self.assertIsNotNone(inferior) - self.assertTrue(inferior.pid > 0) - if self._inferior_startup == self._STARTUP_ATTACH: - # In this case, we want the stub to attach via the command line, so set the command line attach pid here. - attach_pid = inferior.pid - - # Launch the debug monitor stub, attaching to the inferior. - server = self.connect_to_debug_monitor(attach_pid=attach_pid) - self.assertIsNotNone(server) - - if self._inferior_startup == self._STARTUP_LAUNCH: - # Build launch args - if not inferior_exe_path: - inferior_exe_path = os.path.abspath("a.out") - launch_args = [inferior_exe_path] - if inferior_args: - launch_args.extend(inferior_args) - - # Build the expected protocol stream - self.add_no_ack_remote_stream() - if self._inferior_startup == self._STARTUP_LAUNCH: - self.add_verified_launch_packets(launch_args) - - return {"inferior":inferior, "server":server} - - def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds): - response = "" - timeout_time = time.time() + timeout_seconds - - while not expected_content_regex.match(response) and time.time() < timeout_time: - can_read, _, _ = select.select([sock], [], [], timeout_seconds) - if can_read and sock in can_read: - recv_bytes = sock.recv(4096) - if recv_bytes: - response += recv_bytes - - self.assertTrue(expected_content_regex.match(response)) - - def expect_socket_send(self, sock, content, timeout_seconds): - request_bytes_remaining = content - timeout_time = time.time() + timeout_seconds - - while len(request_bytes_remaining) > 0 and time.time() < timeout_time: - _, can_write, _ = select.select([], [sock], [], timeout_seconds) - if can_write and sock in can_write: - written_byte_count = sock.send(request_bytes_remaining) - request_bytes_remaining = request_bytes_remaining[written_byte_count:] - self.assertEquals(len(request_bytes_remaining), 0) - - def do_handshake(self, stub_socket, timeout_seconds=5): - # Write the ack. - self.expect_socket_send(stub_socket, "+", timeout_seconds) - - # Send the start no ack mode packet. - NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0" - bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST) - self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST)) - - # Receive the ack and "OK" - self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds) - - # Send the final ack. - self.expect_socket_send(stub_socket, "+", timeout_seconds) - - def add_no_ack_remote_stream(self): - self.test_sequence.add_log_lines( - ["read packet: +", - "read packet: $QStartNoAckMode#b0", - "send packet: +", - "send packet: $OK#9a", - "read packet: +"], - True) - - def add_verified_launch_packets(self, launch_args): - self.test_sequence.add_log_lines( - ["read packet: %s" % build_gdbremote_A_packet(launch_args), - "send packet: $OK#00", - "read packet: $qLaunchSuccess#a5", - "send packet: $OK#00"], - True) - - def add_thread_suffix_request_packets(self): - self.test_sequence.add_log_lines( - ["read packet: $QThreadSuffixSupported#e4", - "send packet: $OK#00", - ], True) - - def add_process_info_collection_packets(self): - self.test_sequence.add_log_lines( - ["read packet: $qProcessInfo#dc", - { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }], - True) - - _KNOWN_PROCESS_INFO_KEYS = [ - "pid", - "parent-pid", - "real-uid", - "real-gid", - "effective-uid", - "effective-gid", - "cputype", - "cpusubtype", - "ostype", - "triple", - "vendor", - "endian", - "ptrsize" - ] - - def parse_process_info_response(self, context): - # Ensure we have a process info response. - self.assertIsNotNone(context) - process_info_raw = context.get("process_info_raw") - self.assertIsNotNone(process_info_raw) - - # Pull out key:value; pairs. - process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) } - - # Validate keys are known. - for (key, val) in process_info_dict.items(): - self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS) - self.assertIsNotNone(val) - - return process_info_dict - - def add_register_info_collection_packets(self): - self.test_sequence.add_log_lines( - [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True, - "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"), - "save_key":"reg_info_responses" } ], - True) - - def parse_register_info_packets(self, context): - """Return an array of register info dictionaries, one per register info.""" - reg_info_responses = context.get("reg_info_responses") - self.assertIsNotNone(reg_info_responses) - - # Parse register infos. - return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses] - - def expect_gdbremote_sequence(self, timeout_seconds=None): - if not timeout_seconds: - timeout_seconds = self._TIMEOUT_SECONDS - return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger) - - _KNOWN_REGINFO_KEYS = [ - "name", - "alt-name", - "bitsize", - "offset", - "encoding", - "format", - "set", - "gcc", - "dwarf", - "generic", - "container-regs", - "invalidate-regs" - ] - - def assert_valid_reg_info(self, reg_info): - # Assert we know about all the reginfo keys parsed. - for key in reg_info: - self.assertTrue(key in self._KNOWN_REGINFO_KEYS) - - # Check the bare-minimum expected set of register info keys. - self.assertTrue("name" in reg_info) - self.assertTrue("bitsize" in reg_info) - self.assertTrue("offset" in reg_info) - self.assertTrue("encoding" in reg_info) - self.assertTrue("format" in reg_info) - - def find_pc_reg_info(self, reg_infos): - lldb_reg_index = 0 - for reg_info in reg_infos: - if ("generic" in reg_info) and (reg_info["generic"] == "pc"): - return (lldb_reg_index, reg_info) - lldb_reg_index += 1 - - return (None, None) - - def add_lldb_register_index(self, reg_infos): - """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry. - - We'll use this when we want to call packets like P/p with a register index but do so - on only a subset of the full register info set. - """ - self.assertIsNotNone(reg_infos) - - reg_index = 0 - for reg_info in reg_infos: - reg_info["lldb_register_index"] = reg_index - reg_index += 1 - - def add_query_memory_region_packets(self, address): - self.test_sequence.add_log_lines( - ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address), - {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }], - True) - - def parse_key_val_dict(self, key_val_text, allow_dupes=True): - self.assertIsNotNone(key_val_text) - kv_dict = {} - for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text): - key = match.group(1) - val = match.group(2) - if key in kv_dict: - if allow_dupes: - if type(kv_dict[key]) == list: - kv_dict[key].append(val) - else: - # Promote to list - kv_dict[key] = [kv_dict[key], val] - else: - self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict)) - else: - kv_dict[key] = val - return kv_dict - - def parse_memory_region_packet(self, context): - # Ensure we have a context. - self.assertIsNotNone(context.get("memory_region_response")) - - # Pull out key:value; pairs. - mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response")) - - # Validate keys are known. - for (key, val) in mem_region_dict.items(): - self.assertTrue(key in ["start", "size", "permissions", "error"]) - self.assertIsNotNone(val) - - # Return the dictionary of key-value pairs for the memory region. - return mem_region_dict - - def assert_address_within_memory_region(self, test_address, mem_region_dict): - self.assertIsNotNone(mem_region_dict) - self.assertTrue("start" in mem_region_dict) - self.assertTrue("size" in mem_region_dict) - - range_start = int(mem_region_dict["start"], 16) - range_size = int(mem_region_dict["size"], 16) - range_end = range_start + range_size - - if test_address < range_start: - self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) - elif test_address >= range_end: - self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) - - def add_threadinfo_collection_packets(self): - self.test_sequence.add_log_lines( - [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo", - "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"), - "save_key":"threadinfo_responses" } ], - True) - - def parse_threadinfo_packets(self, context): - """Return an array of thread ids (decimal ints), one per thread.""" - threadinfo_responses = context.get("threadinfo_responses") - self.assertIsNotNone(threadinfo_responses) - - thread_ids = [] - for threadinfo_response in threadinfo_responses: - new_thread_infos = parse_threadinfo_response(threadinfo_response) - thread_ids.extend(new_thread_infos) - return thread_ids - - def wait_for_thread_count(self, thread_count, timeout_seconds=3): - start_time = time.time() - timeout_time = start_time + timeout_seconds - - actual_thread_count = 0 - while actual_thread_count < thread_count: - self.reset_test_sequence() - self.add_threadinfo_collection_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - threads = self.parse_threadinfo_packets(context) - self.assertIsNotNone(threads) - - actual_thread_count = len(threads) - - if time.time() > timeout_time: - raise Exception( - 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format( - timeout_seconds, thread_count, actual_thread_count)) - - return threads - - def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1): - self.test_sequence.add_log_lines( - [# Set the breakpoint. - "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind), - # Verify the stub could set it. - "send packet: $OK#00", - ], True) - - if (do_continue): - self.test_sequence.add_log_lines( - [# Continue the inferior. - "read packet: $c#63", - # Expect a breakpoint stop report. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, - ], True) - - def add_remove_breakpoint_packets(self, address, breakpoint_kind=1): - self.test_sequence.add_log_lines( - [# Remove the breakpoint. - "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind), - # Verify the stub could unset it. - "send packet: $OK#00", - ], True) - - def add_qSupported_packets(self): - self.test_sequence.add_log_lines( - ["read packet: $qSupported#00", - {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}}, - ], True) - - _KNOWN_QSUPPORTED_STUB_FEATURES = [ - "augmented-libraries-svr4-read", - "PacketSize", - "QStartNoAckMode", - "QThreadSuffixSupported", - "QListThreadsInStopReply", - "qXfer:auxv:read", - "qXfer:libraries:read", - "qXfer:libraries-svr4:read", - ] - - def parse_qSupported_response(self, context): - self.assertIsNotNone(context) - - raw_response = context.get("qSupported_response") - self.assertIsNotNone(raw_response) - - # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the - # +,-,? is stripped from the key and set as the value. - supported_dict = {} - for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response): - key = match.group(1) - val = match.group(3) - - # key=val: store as is - if val and len(val) > 0: - supported_dict[key] = val - else: - if len(key) < 2: - raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}") - supported_type = key[-1] - key = key[:-1] - if not supported_type in ["+", "-", "?"]: - raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type)) - supported_dict[key] = supported_type - # Ensure we know the supported element - if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES: - raise Exception("unknown qSupported stub feature reported: %s" % key) - - return supported_dict - - def run_process_then_stop(self, run_seconds=1): - # Tell the stub to continue. - self.test_sequence.add_log_lines( - ["read packet: $vCont;c#a8"], - True) - context = self.expect_gdbremote_sequence() - - # Wait for run_seconds. - time.sleep(run_seconds) - - # Send an interrupt, capture a T response. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: {}".format(chr(03)), - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }], - True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - self.assertIsNotNone(context.get("stop_result")) - - return context - - def select_modifiable_register(self, reg_infos): - """Find a register that can be read/written freely.""" - PREFERRED_REGISTER_NAMES = sets.Set(["rax",]) - - # First check for the first register from the preferred register name set. - alternative_register_index = None - - self.assertIsNotNone(reg_infos) - for reg_info in reg_infos: - if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES): - # We found a preferred register. Use it. - return reg_info["lldb_register_index"] - if ("generic" in reg_info) and (reg_info["generic"] == "fp"): - # A frame pointer register will do as a register to modify temporarily. - alternative_register_index = reg_info["lldb_register_index"] - - # We didn't find a preferred register. Return whatever alternative register - # we found, if any. - return alternative_register_index - - def extract_registers_from_stop_notification(self, stop_key_vals_text): - self.assertIsNotNone(stop_key_vals_text) - kv_dict = self.parse_key_val_dict(stop_key_vals_text) - - registers = {} - for (key, val) in kv_dict.items(): - if re.match(r"^[0-9a-fA-F]+$", key): - registers[int(key, 16)] = val - return registers - - def gather_register_infos(self): - self.reset_test_sequence() - self.add_register_info_collection_packets() - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - reg_infos = self.parse_register_info_packets(context) - self.assertIsNotNone(reg_infos) - self.add_lldb_register_index(reg_infos) - - return reg_infos - - def find_generic_register_with_name(self, reg_infos, generic_name): - self.assertIsNotNone(reg_infos) - for reg_info in reg_infos: - if ("generic" in reg_info) and (reg_info["generic"] == generic_name): - return reg_info - return None - - def decode_gdbremote_binary(self, encoded_bytes): - decoded_bytes = "" - i = 0 - while i < len(encoded_bytes): - if encoded_bytes[i] == "}": - # Handle escaped char. - self.assertTrue(i + 1 < len(encoded_bytes)) - decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20) - i +=2 - elif encoded_bytes[i] == "*": - # Handle run length encoding. - self.assertTrue(len(decoded_bytes) > 0) - self.assertTrue(i + 1 < len(encoded_bytes)) - repeat_count = ord(encoded_bytes[i+1]) - 29 - decoded_bytes += decoded_bytes[-1] * repeat_count - i += 2 - else: - decoded_bytes += encoded_bytes[i] - i += 1 - return decoded_bytes - - def build_auxv_dict(self, endian, word_size, auxv_data): - self.assertIsNotNone(endian) - self.assertIsNotNone(word_size) - self.assertIsNotNone(auxv_data) - - auxv_dict = {} - - while len(auxv_data) > 0: - # Chop off key. - raw_key = auxv_data[:word_size] - auxv_data = auxv_data[word_size:] - - # Chop of value. - raw_value = auxv_data[:word_size] - auxv_data = auxv_data[word_size:] - - # Convert raw text from target endian. - key = unpack_endian_binary_string(endian, raw_key) - value = unpack_endian_binary_string(endian, raw_value) - - # Handle ending entry. - if key == 0: - self.assertEquals(value, 0) - return auxv_dict - - # The key should not already be present. - self.assertFalse(key in auxv_dict) - auxv_dict[key] = value - - self.fail("should not reach here - implies required double zero entry not found") - return auxv_dict - - def read_binary_data_in_chunks(self, command_prefix, chunk_length): - """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned.""" - offset = 0 - done = False - decoded_data = "" - - while not done: - # Grab the next iteration of data. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length), - {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} } - ], True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - response_type = context.get("response_type") - self.assertIsNotNone(response_type) - self.assertTrue(response_type in ["l", "m"]) - - # Move offset along. - offset += chunk_length - - # Figure out if we're done. We're done if the response type is l. - done = response_type == "l" - - # Decode binary data. - content_raw = context.get("content_raw") - if content_raw and len(content_raw) > 0: - self.assertIsNotNone(content_raw) - decoded_data += self.decode_gdbremote_binary(content_raw) - return decoded_data - - def add_interrupt_packets(self): - self.test_sequence.add_log_lines([ - # Send the intterupt. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } }, - ], True) - - def parse_interrupt_packets(self, context): - self.assertIsNotNone(context.get("stop_signo")) - self.assertIsNotNone(context.get("stop_key_val_text")) - return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"])) - - def add_QSaveRegisterState_packets(self, thread_id): - if thread_id: - # Use the thread suffix form. - request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id) - else: - request = "read packet: $QSaveRegisterState#00" - - self.test_sequence.add_log_lines([ - request, - {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } }, - ], True) - - def parse_QSaveRegisterState_response(self, context): - self.assertIsNotNone(context) - - save_response = context.get("save_response") - self.assertIsNotNone(save_response) - - if len(save_response) < 1 or save_response[0] == "E": - # error received - return (False, None) - else: - return (True, int(save_response)) - - def add_QRestoreRegisterState_packets(self, save_id, thread_id=None): - if thread_id: - # Use the thread suffix form. - request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id) - else: - request = "read packet: $QRestoreRegisterState:{}#00".format(save_id) - - self.test_sequence.add_log_lines([ - request, - "send packet: $OK#00" - ], True) - - def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None): - self.assertIsNotNone(reg_infos) - - successful_writes = 0 - failed_writes = 0 - - for reg_info in reg_infos: - # Use the lldb register index added to the reg info. We're not necessarily - # working off a full set of register infos, so an inferred register index could be wrong. - reg_index = reg_info["lldb_register_index"] - self.assertIsNotNone(reg_index) - - reg_byte_size = int(reg_info["bitsize"])/8 - self.assertTrue(reg_byte_size > 0) - - # Handle thread suffix. - if thread_id: - p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id) - else: - p_request = "read packet: $p{:x}#00".format(reg_index) - - # Read the existing value. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - p_request, - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify the response length. - p_response = context.get("p_response") - self.assertIsNotNone(p_response) - initial_reg_value = unpack_register_hex_unsigned(endian, p_response) - - # Flip the value by xoring with all 1s - all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8) - flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16) - # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int) - - # Handle thread suffix for P. - if thread_id: - P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id) - else: - P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size)) - - # Write the flipped value to the register. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - P_request, - { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail - # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them - # all flipping perfectly. - P_response = context.get("P_response") - self.assertIsNotNone(P_response) - if P_response == "OK": - successful_writes += 1 - else: - failed_writes += 1 - # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response) - - # Read back the register value, ensure it matches the flipped value. - if P_response == "OK": - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - p_request, - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - verify_p_response_raw = context.get("p_response") - self.assertIsNotNone(verify_p_response_raw) - verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw) - - if verify_bits != flipped_bits_int: - # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts. - # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits) - successful_writes -= 1 - failed_writes +=1 - - return (successful_writes, failed_writes) - - def is_bit_flippable_register(self, reg_info): - if not reg_info: - return False - if not "set" in reg_info: - return False - if reg_info["set"] != "General Purpose Registers": - return False - if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0): - # Don't try to bit flip registers contained in another register. - return False - if re.match("^.s$", reg_info["name"]): - # This is a 2-letter register name that ends in "s", like a segment register. - # Don't try to bit flip these. - return False - # Okay, this looks fine-enough. - return True - - def read_register_values(self, reg_infos, endian, thread_id=None): - self.assertIsNotNone(reg_infos) - values = {} - - for reg_info in reg_infos: - # We append a register index when load reg infos so we can work with subsets. - reg_index = reg_info.get("lldb_register_index") - self.assertIsNotNone(reg_index) - - # Handle thread suffix. - if thread_id: - p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id) - else: - p_request = "read packet: $p{:x}#00".format(reg_index) - - # Read it with p. - self.reset_test_sequence() - self.test_sequence.add_log_lines([ - p_request, - { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Convert value from target endian to integral. - p_response = context.get("p_response") - self.assertIsNotNone(p_response) - self.assertTrue(len(p_response) > 0) - self.assertFalse(p_response[0] == "E") - - values[reg_index] = unpack_register_hex_unsigned(endian, p_response) - - return values - - def add_vCont_query_packets(self): - self.test_sequence.add_log_lines([ - "read packet: $vCont?#49", - {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } }, - ], True) - - def parse_vCont_query_response(self, context): - self.assertIsNotNone(context) - vCont_query_response = context.get("vCont_query_response") - - # Handle case of no vCont support at all - in which case the capture group will be none or zero length. - if not vCont_query_response or len(vCont_query_response) == 0: - return {} - - return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0} - - def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"): - """Used by single step test that appears in a few different contexts.""" - single_step_count = 0 - - while single_step_count < max_step_count: - self.assertIsNotNone(thread_id) - - # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id. - step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction)) - # print "\nstep_packet created: {}\n".format(step_packet) - - # Single step. - self.reset_test_sequence() - if use_Hc_packet: - self.test_sequence.add_log_lines( - [# Set the continue thread. - "read packet: $Hc{0:x}#00".format(thread_id), - "send packet: $OK#00", - ], True) - self.test_sequence.add_log_lines([ - # Single step. - step_packet, - # "read packet: $vCont;s:{0:x}#00".format(thread_id), - # Expect a breakpoint stop report. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, - ], True) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - self.assertIsNotNone(context.get("stop_signo")) - self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP) - - single_step_count += 1 - - # See if the predicate is true. If so, we're done. - if predicate(args): - return (True, single_step_count) - - # The predicate didn't return true within the runaway step count. - return (False, single_step_count) - - def g_c1_c2_contents_are(self, args): - """Used by single step test that appears in a few different contexts.""" - g_c1_address = args["g_c1_address"] - g_c2_address = args["g_c2_address"] - expected_g_c1 = args["expected_g_c1"] - expected_g_c2 = args["expected_g_c2"] - - # Read g_c1 and g_c2 contents. - self.reset_test_sequence() - self.test_sequence.add_log_lines( - ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1), - {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} }, - "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1), - {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Check if what we read from inferior memory is what we are expecting. - self.assertIsNotNone(context.get("g_c1_contents")) - self.assertIsNotNone(context.get("g_c2_contents")) - - return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2) - - def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"): - """Used by single step test that appears in a few different contexts.""" - # Start up the inferior. - procs = self.prep_debug_monitor_and_inferior( - inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"]) - - # Run the process - self.test_sequence.add_log_lines( - [# Start running after initial stop. - "read packet: $c#63", - # Match output line that prints the memory address of the function call entry point. - # Note we require launch-only testing so we can get inferior otuput. - { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$", - "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} }, - # Now stop the inferior. - "read packet: {}".format(chr(03)), - # And wait for the stop notification. - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], - True) - - # Run the packet stream. - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Grab the main thread id. - self.assertIsNotNone(context.get("stop_thread_id")) - main_thread_id = int(context.get("stop_thread_id"), 16) - - # Grab the function address. - self.assertIsNotNone(context.get("function_address")) - function_address = int(context.get("function_address"), 16) - - # Grab the data addresses. - self.assertIsNotNone(context.get("g_c1_address")) - g_c1_address = int(context.get("g_c1_address"), 16) - - self.assertIsNotNone(context.get("g_c2_address")) - g_c2_address = int(context.get("g_c2_address"), 16) - - # Set a breakpoint at the given address. - # Note this might need to be switched per platform (ARM, mips, etc.). - BREAKPOINT_KIND = 1 - self.reset_test_sequence() - self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Remove the breakpoint. - self.reset_test_sequence() - self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND) - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - # Verify g_c1 and g_c2 match expected initial state. - args = {} - args["g_c1_address"] = g_c1_address - args["g_c2_address"] = g_c2_address - args["expected_g_c1"] = "0" - args["expected_g_c2"] = "1" - - self.assertTrue(self.g_c1_c2_contents_are(args)) - - # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code. - args["expected_g_c1"] = "1" - args["expected_g_c2"] = "1" - (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) - self.assertTrue(state_reached) - - # Verify we hit the next state. - args["expected_g_c1"] = "1" - args["expected_g_c2"] = "0" - (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) - self.assertTrue(state_reached) - self.assertEquals(step_count, 1) - - # Verify we hit the next state. - args["expected_g_c1"] = "0" - args["expected_g_c2"] = "0" - (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) - self.assertTrue(state_reached) - self.assertEquals(step_count, 1) - - # Verify we hit the next state. - args["expected_g_c1"] = "0" - args["expected_g_c2"] = "1" - (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) - self.assertTrue(state_reached) - self.assertEquals(step_count, 1) - Index: test/tools/lldb-gdbserver/inferior-crash/Makefile =================================================================== --- test/tools/lldb-gdbserver/inferior-crash/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -LEVEL = ../../../make - -CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS -std=c++11 -# LD_EXTRAS := -lpthread -CXX_SOURCES := main.cpp -MAKE_DSYM :=NO - -include $(LEVEL)/Makefile.rules Index: test/tools/lldb-gdbserver/inferior-crash/TestGdbRemoteAbort.py =================================================================== --- test/tools/lldb-gdbserver/inferior-crash/TestGdbRemoteAbort.py +++ /dev/null @@ -1,45 +0,0 @@ -import unittest2 - -# Add the directory above ours to the python library path since we -# will import from there. -import os.path -import sys -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) - -import gdbremote_testcase -import signal -from lldbtest import * - -class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase): - mydir = TestBase.compute_mydir(__file__) - - def inferior_abort_received(self): - procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"]) - self.assertIsNotNone(procs) - - self.test_sequence.add_log_lines([ - "read packet: $vCont;c#a8", - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", "capture":{ 1:"hex_exit_code"} }, - ], True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - hex_exit_code = context.get("hex_exit_code") - self.assertIsNotNone(hex_exit_code) - self.assertEquals(int(hex_exit_code, 16), signal.SIGABRT) - - @debugserver_test - @dsym_test - def test_inferior_abort_received_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.inferior_abort_received() - - @llgs_test - @dwarf_test - def test_inferior_abort_received_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.inferior_abort_received() - Index: test/tools/lldb-gdbserver/inferior-crash/TestGdbRemoteSegFault.py =================================================================== --- test/tools/lldb-gdbserver/inferior-crash/TestGdbRemoteSegFault.py +++ /dev/null @@ -1,46 +0,0 @@ -import unittest2 - -# Add the directory above ours to the python library path since we -# will import from there. -import os.path -import sys -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) - -import gdbremote_testcase -import signal -from lldbtest import * - -class TestGdbRemoteSegFault(gdbremote_testcase.GdbRemoteTestCaseBase): - mydir = TestBase.compute_mydir(__file__) - - GDB_REMOTE_STOP_CODE_BAD_ACCESS = 0x91 - - def inferior_seg_fault_received(self, expected_signo): - procs = self.prep_debug_monitor_and_inferior(inferior_args=["segfault"]) - self.assertIsNotNone(procs) - - self.test_sequence.add_log_lines([ - "read packet: $vCont;c#a8", - {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", "capture":{ 1:"hex_exit_code"} }, - ], True) - - context = self.expect_gdbremote_sequence() - self.assertIsNotNone(context) - - hex_exit_code = context.get("hex_exit_code") - self.assertIsNotNone(hex_exit_code) - self.assertEquals(int(hex_exit_code, 16), expected_signo) - - @debugserver_test - @dsym_test - def test_inferior_seg_fault_received_debugserver_dsym(self): - self.init_debugserver_test() - self.buildDsym() - self.inferior_seg_fault_received(self.GDB_REMOTE_STOP_CODE_BAD_ACCESS) - - @llgs_test - @dwarf_test - def test_inferior_seg_fault_received_llgs_dwarf(self): - self.init_llgs_test() - self.buildDwarf() - self.inferior_seg_fault_received(signal.SIGSEGV) Index: test/tools/lldb-gdbserver/inferior-crash/main.cpp =================================================================== --- test/tools/lldb-gdbserver/inferior-crash/main.cpp +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include -#include - -namespace -{ - const char *const SEGFAULT_COMMAND = "segfault"; - const char *const ABORT_COMMAND = "abort"; -} - -int main (int argc, char **argv) -{ - if (argc < 2) - { - std::cout << "expected at least one command provided on the command line" << std::endl; - } - - // Process command line args. - for (int i = 1; i < argc; ++i) - { - const char *const command = argv[i]; - if (std::strstr (command, SEGFAULT_COMMAND)) - { - // Perform a null pointer access. - int *const null_int_ptr = nullptr; - *null_int_ptr = 0xDEAD; - } - else if (std::strstr (command, ABORT_COMMAND)) - { - std::abort(); - } - else - { - std::cout << "Unsupported command: " << command << std::endl; - } - } - - return 0; -} Index: test/tools/lldb-gdbserver/lldbgdbserverutils.py =================================================================== --- test/tools/lldb-gdbserver/lldbgdbserverutils.py +++ /dev/null @@ -1,838 +0,0 @@ -"""Module for supporting unit testing of the lldb-gdbserver debug monitor exe. -""" - -import os -import os.path -import platform -import Queue -import re -import socket_packet_pump -import subprocess -import time - -def _get_debug_monitor_from_lldb(lldb_exe, debug_monitor_basename): - """Return the debug monitor exe path given the lldb exe path. - - This method attempts to construct a valid debug monitor exe name - from a given lldb exe name. It will return None if the synthesized - debug monitor name is not found to exist. - - The debug monitor exe path is synthesized by taking the directory - of the lldb exe, and replacing the portion of the base name that - matches "lldb" (case insensitive) and replacing with the value of - debug_monitor_basename. - - Args: - lldb_exe: the path to an lldb executable. - - debug_monitor_basename: the base name portion of the debug monitor - that will replace 'lldb'. - - Returns: - A path to the debug monitor exe if it is found to exist; otherwise, - returns None. - - """ - - exe_dir = os.path.dirname(lldb_exe) - exe_base = os.path.basename(lldb_exe) - - # we'll rebuild the filename by replacing lldb with - # the debug monitor basename, keeping any prefix or suffix in place. - regex = re.compile(r"lldb", re.IGNORECASE) - new_base = regex.sub(debug_monitor_basename, exe_base) - - debug_monitor_exe = os.path.join(exe_dir, new_base) - if os.path.exists(debug_monitor_exe): - return debug_monitor_exe - else: - return None - - -def get_lldb_gdbserver_exe(): - """Return the lldb-gdbserver exe path. - - Returns: - A path to the lldb-gdbserver exe if it is found to exist; otherwise, - returns None. - """ - if "LLDB_DEBUGSERVER_PATH" in os.environ: - return os.environ["LLDB_DEBUGSERVER_PATH"] - elif "LLDB_EXEC" in os.environ: - lldb_exe = os.environ["LLDB_EXEC"] - if not lldb_exe: - return None - else: - return _get_debug_monitor_from_lldb(lldb_exe, "lldb-server") - else: - return None - -def get_debugserver_exe(): - """Return the debugserver exe path. - - Returns: - A path to the debugserver exe if it is found to exist; otherwise, - returns None. - """ - if "LLDB_DEBUGSERVER_PATH" in os.environ: - return os.environ["LLDB_DEBUGSERVER_PATH"] - elif "LLDB_EXEC" in os.environ: - lldb_exe = os.environ["LLDB_EXEC"] - if not lldb_exe: - return None - else: - return _get_debug_monitor_from_lldb(lldb_exe, "debugserver") - else: - return None - - -_LOG_LINE_REGEX = re.compile(r'^(lldb-gdbserver|debugserver)\s+<\s*(\d+)>' + - '\s+(read|send)\s+packet:\s+(.+)$') - - -def _is_packet_lldb_gdbserver_input(packet_type, llgs_input_is_read): - """Return whether a given packet is input for lldb-gdbserver. - - Args: - packet_type: a string indicating 'send' or 'receive', from a - gdbremote packet protocol log. - - llgs_input_is_read: true if lldb-gdbserver input (content sent to - lldb-gdbserver) is listed as 'read' or 'send' in the packet - log entry. - - Returns: - True if the packet should be considered input for lldb-gdbserver; False - otherwise. - """ - if packet_type == 'read': - # when llgs is the read side, then a read packet is meant for - # input to llgs (when captured from the llgs/debugserver exe). - return llgs_input_is_read - elif packet_type == 'send': - # when llgs is the send side, then a send packet is meant to - # be input to llgs (when captured from the lldb exe). - return not llgs_input_is_read - else: - # don't understand what type of packet this is - raise "Unknown packet type: {}".format(packet_type) - - -def handle_O_packet(context, packet_contents, logger): - """Handle O packets.""" - if (not packet_contents) or (len(packet_contents) < 1): - return False - elif packet_contents[0] != "O": - return False - elif packet_contents == "OK": - return False - - new_text = gdbremote_hex_decode_string(packet_contents[1:]) - context["O_content"] += new_text - context["O_count"] += 1 - - if logger: - logger.debug("text: new \"{}\", cumulative: \"{}\"".format(new_text, context["O_content"])) - - return True - -_STRIP_CHECKSUM_REGEX = re.compile(r'#[0-9a-fA-F]{2}$') -_STRIP_COMMAND_PREFIX_REGEX = re.compile(r"^\$") -_STRIP_COMMAND_PREFIX_M_REGEX = re.compile(r"^\$m") - - -def assert_packets_equal(asserter, actual_packet, expected_packet): - # strip off the checksum digits of the packet. When we're in - # no-ack mode, the # checksum is ignored, and should not be cause - # for a mismatched packet. - actual_stripped = _STRIP_CHECKSUM_REGEX.sub('', actual_packet) - expected_stripped = _STRIP_CHECKSUM_REGEX.sub('', expected_packet) - asserter.assertEqual(actual_stripped, expected_stripped) - -def expect_lldb_gdbserver_replay( - asserter, - sock, - test_sequence, - timeout_seconds, - logger=None): - """Replay socket communication with lldb-gdbserver and verify responses. - - Args: - asserter: the object providing assertEqual(first, second, msg=None), e.g. TestCase instance. - - sock: the TCP socket connected to the lldb-gdbserver exe. - - test_sequence: a GdbRemoteTestSequence instance that describes - the messages sent to the gdb remote and the responses - expected from it. - - timeout_seconds: any response taking more than this number of - seconds will cause an exception to be raised. - - logger: a Python logger instance. - - Returns: - The context dictionary from running the given gdbremote - protocol sequence. This will contain any of the capture - elements specified to any GdbRemoteEntry instances in - test_sequence. - - The context will also contain an entry, context["O_content"] - which contains the text from the inferior received via $O - packets. $O packets should not attempt to be matched - directly since they are not entirely deterministic as to - how many arrive and how much text is in each one. - - context["O_count"] will contain an integer of the number of - O packets received. - """ - - # Ensure we have some work to do. - if len(test_sequence.entries) < 1: - return {} - - context = {"O_count":0, "O_content":""} - with socket_packet_pump.SocketPacketPump(sock, logger) as pump: - # Grab the first sequence entry. - sequence_entry = test_sequence.entries.pop(0) - - # While we have an active sequence entry, send messages - # destined for the stub and collect/match/process responses - # expected from the stub. - while sequence_entry: - if sequence_entry.is_send_to_remote(): - # This is an entry to send to the remote debug monitor. - send_packet = sequence_entry.get_send_packet() - if logger: - if len(send_packet) == 1 and send_packet[0] == chr(3): - packet_desc = "^C" - else: - packet_desc = send_packet - logger.info("sending packet to remote: {}".format(packet_desc)) - sock.sendall(send_packet) - else: - # This is an entry expecting to receive content from the remote debug monitor. - - # We'll pull from (and wait on) the queue appropriate for the type of matcher. - # We keep separate queues for process output (coming from non-deterministic - # $O packet division) and for all other packets. - if sequence_entry.is_output_matcher(): - try: - # Grab next entry from the output queue. - content = pump.output_queue().get(True, timeout_seconds) - except Queue.Empty: - if logger: - logger.warning("timeout waiting for stub output (accumulated output:{})".format(pump.get_accumulated_output())) - raise Exception("timed out while waiting for output match (accumulated output: {})".format(pump.get_accumulated_output())) - else: - try: - content = pump.packet_queue().get(True, timeout_seconds) - except Queue.Empty: - if logger: - logger.warning("timeout waiting for packet match (receive buffer: {})".format(pump.get_receive_buffer())) - raise Exception("timed out while waiting for packet match (receive buffer: {})".format(pump.get_receive_buffer())) - - # Give the sequence entry the opportunity to match the content. - # Output matchers might match or pass after more output accumulates. - # Other packet types generally must match. - asserter.assertIsNotNone(content) - context = sequence_entry.assert_match(asserter, content, context=context) - - # Move on to next sequence entry as needed. Some sequence entries support executing multiple - # times in different states (for looping over query/response packets). - if sequence_entry.is_consumed(): - if len(test_sequence.entries) > 0: - sequence_entry = test_sequence.entries.pop(0) - else: - sequence_entry = None - - # Fill in the O_content entries. - context["O_count"] = 1 - context["O_content"] = pump.get_accumulated_output() - - return context - -def gdbremote_hex_encode_string(str): - output = '' - for c in str: - output += '{0:02x}'.format(ord(c)) - return output - -def gdbremote_hex_decode_string(str): - return str.decode("hex") - -def gdbremote_packet_encode_string(str): - checksum = 0 - for c in str: - checksum += ord(c) - return '$' + str + '#{0:02x}'.format(checksum % 256) - -def build_gdbremote_A_packet(args_list): - """Given a list of args, create a properly-formed $A packet containing each arg. - """ - payload = "A" - - # build the arg content - arg_index = 0 - for arg in args_list: - # Comma-separate the args. - if arg_index > 0: - payload += ',' - - # Hex-encode the arg. - hex_arg = gdbremote_hex_encode_string(arg) - - # Build the A entry. - payload += "{},{},{}".format(len(hex_arg), arg_index, hex_arg) - - # Next arg index, please. - arg_index += 1 - - # return the packetized payload - return gdbremote_packet_encode_string(payload) - - -def parse_reg_info_response(response_packet): - if not response_packet: - raise Exception("response_packet cannot be None") - - # Strip off prefix $ and suffix #xx if present. - response_packet = _STRIP_COMMAND_PREFIX_REGEX.sub("", response_packet) - response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) - - # Build keyval pairs - values = {} - for kv in response_packet.split(";"): - if len(kv) < 1: - continue - (key, val) = kv.split(':') - values[key] = val - - return values - - -def parse_threadinfo_response(response_packet): - if not response_packet: - raise Exception("response_packet cannot be None") - - # Strip off prefix $ and suffix #xx if present. - response_packet = _STRIP_COMMAND_PREFIX_M_REGEX.sub("", response_packet) - response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) - - # Return list of thread ids - return [int(thread_id_hex,16) for thread_id_hex in response_packet.split(",") if len(thread_id_hex) > 0] - -def unpack_endian_binary_string(endian, value_string): - """Unpack a gdb-remote binary (post-unescaped, i.e. not escaped) response to an unsigned int given endianness of the inferior.""" - if not endian: - raise Exception("endian cannot be None") - if not value_string or len(value_string) < 1: - raise Exception("value_string cannot be None or empty") - - if endian == 'little': - value = 0 - i = 0 - while len(value_string) > 0: - value += (ord(value_string[0]) << i) - value_string = value_string[1:] - i += 8 - return value - elif endian == 'big': - value = 0 - while len(value_string) > 0: - value = (value << 8) + ord(value_string[0]) - value_string = value_string[1:] - return value - else: - # pdp is valid but need to add parse code once needed. - raise Exception("unsupported endian:{}".format(endian)) - -def unpack_register_hex_unsigned(endian, value_string): - """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" - if not endian: - raise Exception("endian cannot be None") - if not value_string or len(value_string) < 1: - raise Exception("value_string cannot be None or empty") - - if endian == 'little': - value = 0 - i = 0 - while len(value_string) > 0: - value += (int(value_string[0:2], 16) << i) - value_string = value_string[2:] - i += 8 - return value - elif endian == 'big': - return int(value_string, 16) - else: - # pdp is valid but need to add parse code once needed. - raise Exception("unsupported endian:{}".format(endian)) - -def pack_register_hex(endian, value, byte_size=None): - """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" - if not endian: - raise Exception("endian cannot be None") - - if endian == 'little': - # Create the litt-endian return value. - retval = "" - while value != 0: - retval = retval + "{:02x}".format(value & 0xff) - value = value >> 8 - if byte_size: - # Add zero-fill to the right/end (MSB side) of the value. - retval += "00" * (byte_size - len(retval)/2) - return retval - - elif endian == 'big': - retval = value.encode("hex") - if byte_size: - # Add zero-fill to the left/front (MSB side) of the value. - retval = ("00" * (byte_size - len(retval)/2)) + retval - return retval - - else: - # pdp is valid but need to add parse code once needed. - raise Exception("unsupported endian:{}".format(endian)) - -class GdbRemoteEntryBase(object): - def is_output_matcher(self): - return False - -class GdbRemoteEntry(GdbRemoteEntryBase): - - def __init__(self, is_send_to_remote=True, exact_payload=None, regex=None, capture=None, expect_captures=None): - """Create an entry representing one piece of the I/O to/from a gdb remote debug monitor. - - Args: - - is_send_to_remote: True if this entry is a message to be - sent to the gdbremote debug monitor; False if this - entry represents text to be matched against the reply - from the gdbremote debug monitor. - - exact_payload: if not None, then this packet is an exact - send (when sending to the remote) or an exact match of - the response from the gdbremote. The checksums are - ignored on exact match requests since negotiation of - no-ack makes the checksum content essentially - undefined. - - regex: currently only valid for receives from gdbremote. - When specified (and only if exact_payload is None), - indicates the gdbremote response must match the given - regex. Match groups in the regex can be used for two - different purposes: saving the match (see capture - arg), or validating that a match group matches a - previously established value (see expect_captures). It - is perfectly valid to have just a regex arg and to - specify neither capture or expect_captures args. This - arg only makes sense if exact_payload is not - specified. - - capture: if specified, is a dictionary of regex match - group indices (should start with 1) to variable names - that will store the capture group indicated by the - index. For example, {1:"thread_id"} will store capture - group 1's content in the context dictionary where - "thread_id" is the key and the match group value is - the value. The value stored off can be used later in a - expect_captures expression. This arg only makes sense - when regex is specified. - - expect_captures: if specified, is a dictionary of regex - match group indices (should start with 1) to variable - names, where the match group should match the value - existing in the context at the given variable name. - For example, {2:"thread_id"} indicates that the second - match group must match the value stored under the - context's previously stored "thread_id" key. This arg - only makes sense when regex is specified. - """ - self._is_send_to_remote = is_send_to_remote - self.exact_payload = exact_payload - self.regex = regex - self.capture = capture - self.expect_captures = expect_captures - - def is_send_to_remote(self): - return self._is_send_to_remote - - def is_consumed(self): - # For now, all packets are consumed after first use. - return True - - def get_send_packet(self): - if not self.is_send_to_remote(): - raise Exception("get_send_packet() called on GdbRemoteEntry that is not a send-to-remote packet") - if not self.exact_payload: - raise Exception("get_send_packet() called on GdbRemoteEntry but it doesn't have an exact payload") - return self.exact_payload - - def _assert_exact_payload_match(self, asserter, actual_packet): - assert_packets_equal(asserter, actual_packet, self.exact_payload) - return None - - def _assert_regex_match(self, asserter, actual_packet, context): - # Ensure the actual packet matches from the start of the actual packet. - match = self.regex.match(actual_packet) - if not match: - asserter.fail("regex '{}' failed to match against content '{}'".format(self.regex.pattern, actual_packet)) - - if self.capture: - # Handle captures. - for group_index, var_name in self.capture.items(): - capture_text = match.group(group_index) - # It is okay for capture text to be None - which it will be if it is a group that can match nothing. - # The user must be okay with it since the regex itself matched above. - context[var_name] = capture_text - - if self.expect_captures: - # Handle comparing matched groups to context dictionary entries. - for group_index, var_name in self.expect_captures.items(): - capture_text = match.group(group_index) - if not capture_text: - raise Exception("No content to expect for group index {}".format(group_index)) - asserter.assertEquals(capture_text, context[var_name]) - - return context - - def assert_match(self, asserter, actual_packet, context=None): - # This only makes sense for matching lines coming from the - # remote debug monitor. - if self.is_send_to_remote(): - raise Exception("Attempted to match a packet being sent to the remote debug monitor, doesn't make sense.") - - # Create a new context if needed. - if not context: - context = {} - - # If this is an exact payload, ensure they match exactly, - # ignoring the packet checksum which is optional for no-ack - # mode. - if self.exact_payload: - self._assert_exact_payload_match(asserter, actual_packet) - return context - elif self.regex: - return self._assert_regex_match(asserter, actual_packet, context) - else: - raise Exception("Don't know how to match a remote-sent packet when exact_payload isn't specified.") - -class MultiResponseGdbRemoteEntry(GdbRemoteEntryBase): - """Represents a query/response style packet. - - Assumes the first item is sent to the gdb remote. - An end sequence regex indicates the end of the query/response - packet sequence. All responses up through (but not including) the - end response are stored in a context variable. - - Settings accepted from params: - - next_query or query: required. The typical query packet without the $ prefix or #xx suffix. - If there is a special first packet to start the iteration query, see the - first_query key. - - first_query: optional. If the first query requires a special query command, specify - it with this key. Do not specify the $ prefix or #xx suffix. - - append_iteration_suffix: defaults to False. Specify True if the 0-based iteration - index should be appended as a suffix to the command. e.g. qRegisterInfo with - this key set true will generate query packets of qRegisterInfo0, qRegisterInfo1, - etc. - - end_regex: required. Specifies a compiled regex object that will match the full text - of any response that signals an end to the iteration. It must include the - initial $ and ending #xx and must match the whole packet. - - save_key: required. Specifies the key within the context where an array will be stored. - Each packet received from the gdb remote that does not match the end_regex will get - appended to the array stored within the context at that key. - - runaway_response_count: optional. Defaults to 10000. If this many responses are retrieved, - assume there is something wrong with either the response collection or the ending - detection regex and throw an exception. - """ - def __init__(self, params): - self._next_query = params.get("next_query", params.get("query")) - if not self._next_query: - raise "either next_query or query key must be specified for MultiResponseGdbRemoteEntry" - - self._first_query = params.get("first_query", self._next_query) - self._append_iteration_suffix = params.get("append_iteration_suffix", False) - self._iteration = 0 - self._end_regex = params["end_regex"] - self._save_key = params["save_key"] - self._runaway_response_count = params.get("runaway_response_count", 10000) - self._is_send_to_remote = True - self._end_matched = False - - def is_send_to_remote(self): - return self._is_send_to_remote - - def get_send_packet(self): - if not self.is_send_to_remote(): - raise Exception("get_send_packet() called on MultiResponseGdbRemoteEntry that is not in the send state") - if self._end_matched: - raise Exception("get_send_packet() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") - - # Choose the first or next query for the base payload. - if self._iteration == 0 and self._first_query: - payload = self._first_query - else: - payload = self._next_query - - # Append the suffix as needed. - if self._append_iteration_suffix: - payload += "%x" % self._iteration - - # Keep track of the iteration. - self._iteration += 1 - - # Now that we've given the query packet, flip the mode to receive/match. - self._is_send_to_remote = False - - # Return the result, converted to packet form. - return gdbremote_packet_encode_string(payload) - - def is_consumed(self): - return self._end_matched - - def assert_match(self, asserter, actual_packet, context=None): - # This only makes sense for matching lines coming from the remote debug monitor. - if self.is_send_to_remote(): - raise Exception("assert_match() called on MultiResponseGdbRemoteEntry but state is set to send a query packet.") - - if self._end_matched: - raise Exception("assert_match() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") - - # Set up a context as needed. - if not context: - context = {} - - # Check if the packet matches the end condition. - match = self._end_regex.match(actual_packet) - if match: - # We're done iterating. - self._end_matched = True - return context - - # Not done iterating - save the packet. - context[self._save_key] = context.get(self._save_key, []) - context[self._save_key].append(actual_packet) - - # Check for a runaway response cycle. - if len(context[self._save_key]) >= self._runaway_response_count: - raise Exception("runaway query/response cycle detected: %d responses captured so far. Last response: %s" % - (len(context[self._save_key]), context[self._save_key][-1])) - - # Flip the mode to send for generating the query. - self._is_send_to_remote = True - return context - -class MatchRemoteOutputEntry(GdbRemoteEntryBase): - """Waits for output from the debug monitor to match a regex or time out. - - This entry type tries to match each time new gdb remote output is accumulated - using a provided regex. If the output does not match the regex within the - given timeframe, the command fails the playback session. If the regex does - match, any capture fields are recorded in the context. - - Settings accepted from params: - - regex: required. Specifies a compiled regex object that must either succeed - with re.match or re.search (see regex_mode below) within the given timeout - (see timeout_seconds below) or cause the playback to fail. - - regex_mode: optional. Available values: "match" or "search". If "match", the entire - stub output as collected so far must match the regex. If search, then the regex - must match starting somewhere within the output text accumulated thus far. - Default: "match" (i.e. the regex must match the entirety of the accumulated output - buffer, so unexpected text will generally fail the match). - - capture: optional. If specified, is a dictionary of regex match group indices (should start - with 1) to variable names that will store the capture group indicated by the - index. For example, {1:"thread_id"} will store capture group 1's content in the - context dictionary where "thread_id" is the key and the match group value is - the value. The value stored off can be used later in a expect_captures expression. - This arg only makes sense when regex is specified. - """ - def __init__(self, regex=None, regex_mode="match", capture=None): - self._regex = regex - self._regex_mode = regex_mode - self._capture = capture - self._matched = False - - if not self._regex: - raise Exception("regex cannot be None") - - if not self._regex_mode in ["match", "search"]: - raise Exception("unsupported regex mode \"{}\": must be \"match\" or \"search\"".format(self._regex_mode)) - - def is_output_matcher(self): - return True - - def is_send_to_remote(self): - # This is always a "wait for remote" command. - return False - - def is_consumed(self): - return self._matched - - def assert_match(self, asserter, accumulated_output, context): - # Validate args. - if not accumulated_output: - raise Exception("accumulated_output cannot be none") - if not context: - raise Exception("context cannot be none") - - # Validate that we haven't already matched. - if self._matched: - raise Exception("invalid state - already matched, attempting to match again") - - # If we don't have any content yet, we don't match. - if len(accumulated_output) < 1: - return context - - # Check if we match - if self._regex_mode == "match": - match = self._regex.match(accumulated_output) - elif self._regex_mode == "search": - match = self._regex.search(accumulated_output) - else: - raise Exception("Unexpected regex mode: {}".format(self._regex_mode)) - - # If we don't match, wait to try again after next $O content, or time out. - if not match: - # print "re pattern \"{}\" did not match against \"{}\"".format(self._regex.pattern, accumulated_output) - return context - - # We do match. - self._matched = True - # print "re pattern \"{}\" matched against \"{}\"".format(self._regex.pattern, accumulated_output) - - # Collect up any captures into the context. - if self._capture: - # Handle captures. - for group_index, var_name in self._capture.items(): - capture_text = match.group(group_index) - if not capture_text: - raise Exception("No content for group index {}".format(group_index)) - context[var_name] = capture_text - - return context - - -class GdbRemoteTestSequence(object): - - _LOG_LINE_REGEX = re.compile(r'^.*(read|send)\s+packet:\s+(.+)$') - - def __init__(self, logger): - self.entries = [] - self.logger = logger - - def add_log_lines(self, log_lines, remote_input_is_read): - for line in log_lines: - if type(line) == str: - # Handle log line import - # if self.logger: - # self.logger.debug("processing log line: {}".format(line)) - match = self._LOG_LINE_REGEX.match(line) - if match: - playback_packet = match.group(2) - direction = match.group(1) - if _is_packet_lldb_gdbserver_input(direction, remote_input_is_read): - # Handle as something to send to the remote debug monitor. - # if self.logger: - # self.logger.info("processed packet to send to remote: {}".format(playback_packet)) - self.entries.append(GdbRemoteEntry(is_send_to_remote=True, exact_payload=playback_packet)) - else: - # Log line represents content to be expected from the remote debug monitor. - # if self.logger: - # self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet)) - self.entries.append(GdbRemoteEntry(is_send_to_remote=False,exact_payload=playback_packet)) - else: - raise Exception("failed to interpret log line: {}".format(line)) - elif type(line) == dict: - entry_type = line.get("type", "regex_capture") - if entry_type == "regex_capture": - # Handle more explicit control over details via dictionary. - direction = line.get("direction", None) - regex = line.get("regex", None) - capture = line.get("capture", None) - expect_captures = line.get("expect_captures", None) - - # Compile the regex. - if regex and (type(regex) == str): - regex = re.compile(regex) - - if _is_packet_lldb_gdbserver_input(direction, remote_input_is_read): - # Handle as something to send to the remote debug monitor. - # if self.logger: - # self.logger.info("processed dict sequence to send to remote") - self.entries.append(GdbRemoteEntry(is_send_to_remote=True, regex=regex, capture=capture, expect_captures=expect_captures)) - else: - # Log line represents content to be expected from the remote debug monitor. - # if self.logger: - # self.logger.info("processed dict sequence to match receiving from remote") - self.entries.append(GdbRemoteEntry(is_send_to_remote=False, regex=regex, capture=capture, expect_captures=expect_captures)) - elif entry_type == "multi_response": - self.entries.append(MultiResponseGdbRemoteEntry(line)) - elif entry_type == "output_match": - - regex = line.get("regex", None) - # Compile the regex. - if regex and (type(regex) == str): - regex = re.compile(regex) - - regex_mode = line.get("regex_mode", "match") - capture = line.get("capture", None) - self.entries.append(MatchRemoteOutputEntry(regex=regex, regex_mode=regex_mode, capture=capture)) - else: - raise Exception("unknown entry type \"%s\"" % entry_type) - -def process_is_running(pid, unknown_value=True): - """If possible, validate that the given pid represents a running process on the local system. - - Args: - - pid: an OS-specific representation of a process id. Should be an integral value. - - unknown_value: value used when we cannot determine how to check running local - processes on the OS. - - Returns: - - If we can figure out how to check running process ids on the given OS: - return True if the process is running, or False otherwise. - - If we don't know how to check running process ids on the given OS: - return the value provided by the unknown_value arg. - """ - if type(pid) != int: - raise Exception("pid must be of type int") - - process_ids = [] - - if platform.system() in ['Darwin', 'Linux', 'FreeBSD', 'NetBSD']: - # Build the list of running process ids - output = subprocess.check_output("ps ax | awk '{ print $1; }'", shell=True) - text_process_ids = output.split('\n')[1:] - # Convert text pids to ints - process_ids = [int(text_pid) for text_pid in text_process_ids if text_pid != ''] - # elif {your_platform_here}: - # fill in process_ids as a list of int type process IDs running on - # the local system. - else: - # Don't know how to get list of running process IDs on this - # OS, so return the "don't know" value. - return unknown_value - - # Check if the pid is in the process_ids - return pid in process_ids - -if __name__ == '__main__': - EXE_PATH = get_lldb_gdbserver_exe() - if EXE_PATH: - print "lldb-gdbserver path detected: {}".format(EXE_PATH) - else: - print "lldb-gdbserver could not be found" Index: test/tools/lldb-gdbserver/main.cpp =================================================================== --- test/tools/lldb-gdbserver/main.cpp +++ /dev/null @@ -1,404 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(__APPLE__) -__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2) -int pthread_threadid_np(pthread_t,__uint64_t*); -#elif defined(__linux__) -#include -#endif - -#if defined(__linux__) -#include -#endif - -static const char *const RETVAL_PREFIX = "retval:"; -static const char *const SLEEP_PREFIX = "sleep:"; -static const char *const STDERR_PREFIX = "stderr:"; -static const char *const SET_MESSAGE_PREFIX = "set-message:"; -static const char *const PRINT_MESSAGE_COMMAND = "print-message:"; -static const char *const GET_DATA_ADDRESS_PREFIX = "get-data-address-hex:"; -static const char *const GET_STACK_ADDRESS_COMMAND = "get-stack-address-hex:"; -static const char *const GET_HEAP_ADDRESS_COMMAND = "get-heap-address-hex:"; - -static const char *const GET_CODE_ADDRESS_PREFIX = "get-code-address-hex:"; -static const char *const CALL_FUNCTION_PREFIX = "call-function:"; - -static const char *const THREAD_PREFIX = "thread:"; -static const char *const THREAD_COMMAND_NEW = "new"; -static const char *const THREAD_COMMAND_PRINT_IDS = "print-ids"; -static const char *const THREAD_COMMAND_SEGFAULT = "segfault"; - -static bool g_print_thread_ids = false; -static pthread_mutex_t g_print_mutex = PTHREAD_MUTEX_INITIALIZER; -static bool g_threads_do_segfault = false; - -static pthread_mutex_t g_jump_buffer_mutex = PTHREAD_MUTEX_INITIALIZER; -static jmp_buf g_jump_buffer; -static bool g_is_segfaulting = false; - -static char g_message[256]; - -static volatile char g_c1 = '0'; -static volatile char g_c2 = '1'; - -static void -print_thread_id () -{ - // Put in the right magic here for your platform to spit out the thread id (tid) that debugserver/lldb-gdbserver would see as a TID. - // Otherwise, let the else clause print out the unsupported text so that the unit test knows to skip verifying thread ids. -#if defined(__APPLE__) - __uint64_t tid = 0; - pthread_threadid_np(pthread_self(), &tid); - printf ("%" PRIx64, tid); -#elif defined (__linux__) - // This is a call to gettid() via syscall. - printf ("%" PRIx64, static_cast (syscall (__NR_gettid))); -#else - printf("{no-tid-support}"); -#endif -} - -static void -signal_handler (int signo) -{ - const char *signal_name = nullptr; - switch (signo) - { - case SIGUSR1: signal_name = "SIGUSR1"; break; - case SIGSEGV: signal_name = "SIGSEGV"; break; - default: signal_name = nullptr; - } - - // Print notice that we received the signal on a given thread. - pthread_mutex_lock (&g_print_mutex); - if (signal_name) - printf ("received %s on thread id: ", signal_name); - else - printf ("received signo %d (%s) on thread id: ", signo, strsignal (signo)); - print_thread_id (); - printf ("\n"); - pthread_mutex_unlock (&g_print_mutex); - - // Reset the signal handler if we're one of the expected signal handlers. - switch (signo) - { - case SIGSEGV: - if (g_is_segfaulting) - { - // Fix up the pointer we're writing to. This needs to happen if nothing intercepts the SIGSEGV - // (i.e. if somebody runs this from the command line). - longjmp(g_jump_buffer, 1); - } - break; - case SIGUSR1: - if (g_is_segfaulting) - { - // Fix up the pointer we're writing to. This is used to test gdb remote signal delivery. - // A SIGSEGV will be raised when the thread is created, switched out for a SIGUSR1, and - // then this code still needs to fix the seg fault. - // (i.e. if somebody runs this from the command line). - longjmp(g_jump_buffer, 1); - } - break; - } - - // Reset the signal handler. - sig_t sig_result = signal (signo, signal_handler); - if (sig_result == SIG_ERR) - { - fprintf(stderr, "failed to set signal handler: errno=%d\n", errno); - exit (1); - } -} - -static void -swap_chars () -{ - g_c1 = '1'; - g_c2 = '0'; - - g_c1 = '0'; - g_c2 = '1'; -} - -static void -hello () -{ - pthread_mutex_lock (&g_print_mutex); - printf ("hello, world\n"); - pthread_mutex_unlock (&g_print_mutex); -} - -static void* -thread_func (void *arg) -{ - static pthread_mutex_t s_thread_index_mutex = PTHREAD_MUTEX_INITIALIZER; - static int s_thread_index = 1; - - pthread_mutex_lock (&s_thread_index_mutex); - const int this_thread_index = s_thread_index++; - pthread_mutex_unlock (&s_thread_index_mutex); - - if (g_print_thread_ids) - { - pthread_mutex_lock (&g_print_mutex); - printf ("thread %d id: ", this_thread_index); - print_thread_id (); - printf ("\n"); - pthread_mutex_unlock (&g_print_mutex); - } - - if (g_threads_do_segfault) - { - // Sleep for a number of seconds based on the thread index. - // TODO add ability to send commands to test exe so we can - // handle timing more precisely. This is clunky. All we're - // trying to do is add predictability as to the timing of - // signal generation by created threads. - int sleep_seconds = 2 * (this_thread_index - 1); - while (sleep_seconds > 0) - sleep_seconds = sleep(sleep_seconds); - - // Test creating a SEGV. - pthread_mutex_lock (&g_jump_buffer_mutex); - g_is_segfaulting = true; - int *bad_p = nullptr; - if (setjmp(g_jump_buffer) == 0) - { - // Force a seg fault signal on this thread. - *bad_p = 0; - } - else - { - // Tell the system we're no longer seg faulting. - // Used by the SIGUSR1 signal handler that we inject - // in place of the SIGSEGV so it only tries to - // recover from the SIGSEGV if this seg fault code - // was in play. - g_is_segfaulting = false; - } - pthread_mutex_unlock (&g_jump_buffer_mutex); - - pthread_mutex_lock (&g_print_mutex); - printf ("thread "); - print_thread_id (); - printf (": past SIGSEGV\n"); - pthread_mutex_unlock (&g_print_mutex); - } - - int sleep_seconds_remaining = 5; - while (sleep_seconds_remaining > 0) - { - sleep_seconds_remaining = sleep (sleep_seconds_remaining); - } - - return nullptr; -} - -int main (int argc, char **argv) -{ -#if defined(__linux__) - // Immediately enable any ptracer so that we can allow the stub attach - // operation to succeed. Some Linux kernels are locked down so that - // only an ancestor can be a ptracer of a process. This disables that - // restriction. Without it, attach-related stub tests will fail. -#if defined(PR_SET_PTRACER) && defined(PR_SET_PTRACER_ANY) - const int prctl_result = prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0); - static_cast (prctl_result); -#endif -#endif - - std::vector threads; - std::unique_ptr heap_array_up; - int return_value = 0; - - // Set the signal handler. - sig_t sig_result = signal (SIGALRM, signal_handler); - if (sig_result == SIG_ERR) - { - fprintf(stderr, "failed to set SIGALRM signal handler: errno=%d\n", errno); - exit (1); - } - - sig_result = signal (SIGUSR1, signal_handler); - if (sig_result == SIG_ERR) - { - fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); - exit (1); - } - - sig_result = signal (SIGSEGV, signal_handler); - if (sig_result == SIG_ERR) - { - fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); - exit (1); - } - - // Process command line args. - for (int i = 1; i < argc; ++i) - { - if (std::strstr (argv[i], STDERR_PREFIX)) - { - // Treat remainder as text to go to stderr. - fprintf (stderr, "%s\n", (argv[i] + strlen (STDERR_PREFIX))); - } - else if (std::strstr (argv[i], RETVAL_PREFIX)) - { - // Treat as the return value for the program. - return_value = std::atoi (argv[i] + strlen (RETVAL_PREFIX)); - } - else if (std::strstr (argv[i], SLEEP_PREFIX)) - { - // Treat as the amount of time to have this process sleep (in seconds). - int sleep_seconds_remaining = std::atoi (argv[i] + strlen (SLEEP_PREFIX)); - - // Loop around, sleeping until all sleep time is used up. Note that - // signals will cause sleep to end early with the number of seconds remaining. - for (int i = 0; sleep_seconds_remaining > 0; ++i) - { - sleep_seconds_remaining = sleep (sleep_seconds_remaining); - // std::cout << "sleep result (call " << i << "): " << sleep_seconds_remaining << std::endl; - } - } - else if (std::strstr (argv[i], SET_MESSAGE_PREFIX)) - { - // Copy the contents after "set-message:" to the g_message buffer. - // Used for reading inferior memory and verifying contents match expectations. - strncpy (g_message, argv[i] + strlen (SET_MESSAGE_PREFIX), sizeof (g_message)); - - // Ensure we're null terminated. - g_message[sizeof (g_message) - 1] = '\0'; - - } - else if (std::strstr (argv[i], PRINT_MESSAGE_COMMAND)) - { - pthread_mutex_lock (&g_print_mutex); - printf ("message: %s\n", g_message); - pthread_mutex_unlock (&g_print_mutex); - } - else if (std::strstr (argv[i], GET_DATA_ADDRESS_PREFIX)) - { - volatile void *data_p = nullptr; - - if (std::strstr (argv[i] + strlen (GET_DATA_ADDRESS_PREFIX), "g_message")) - data_p = &g_message[0]; - else if (std::strstr (argv[i] + strlen (GET_DATA_ADDRESS_PREFIX), "g_c1")) - data_p = &g_c1; - else if (std::strstr (argv[i] + strlen (GET_DATA_ADDRESS_PREFIX), "g_c2")) - data_p = &g_c2; - - pthread_mutex_lock (&g_print_mutex); - printf ("data address: %p\n", data_p); - pthread_mutex_unlock (&g_print_mutex); - } - else if (std::strstr (argv[i], GET_HEAP_ADDRESS_COMMAND)) - { - // Create a byte array if not already present. - if (!heap_array_up) - heap_array_up.reset (new uint8_t[32]); - - pthread_mutex_lock (&g_print_mutex); - printf ("heap address: %p\n", heap_array_up.get ()); - pthread_mutex_unlock (&g_print_mutex); - } - else if (std::strstr (argv[i], GET_STACK_ADDRESS_COMMAND)) - { - pthread_mutex_lock (&g_print_mutex); - printf ("stack address: %p\n", &return_value); - pthread_mutex_unlock (&g_print_mutex); - } - else if (std::strstr (argv[i], GET_CODE_ADDRESS_PREFIX)) - { - void (*func_p)() = nullptr; - - if (std::strstr (argv[i] + strlen (GET_CODE_ADDRESS_PREFIX), "hello")) - func_p = hello; - else if (std::strstr (argv[i] + strlen (GET_CODE_ADDRESS_PREFIX), "swap_chars")) - func_p = swap_chars; - - pthread_mutex_lock (&g_print_mutex); - printf ("code address: %p\n", func_p); - pthread_mutex_unlock (&g_print_mutex); - } - else if (std::strstr (argv[i], CALL_FUNCTION_PREFIX)) - { - // Defaut to providing the address of main. - if (std::strcmp (argv[i] + strlen (CALL_FUNCTION_PREFIX), "hello") == 0) - hello(); - else if (std::strcmp (argv[i] + strlen (CALL_FUNCTION_PREFIX), "swap_chars") == 0) - swap_chars(); - else - { - pthread_mutex_lock (&g_print_mutex); - printf ("unknown function: %s\n", argv[i] + strlen (CALL_FUNCTION_PREFIX)); - pthread_mutex_unlock (&g_print_mutex); - } - } - else if (std::strstr (argv[i], THREAD_PREFIX)) - { - // Check if we're creating a new thread. - if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_NEW)) - { - // Create a new thread. - pthread_t new_thread; - const int err = ::pthread_create (&new_thread, nullptr, thread_func, nullptr); - if (err) - { - fprintf (stderr, "pthread_create() failed with error code %d\n", err); - exit (err); - } - threads.push_back (new_thread); - } - else if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_PRINT_IDS)) - { - // Turn on thread id announcing. - g_print_thread_ids = true; - - // And announce us. - pthread_mutex_lock (&g_print_mutex); - printf ("thread 0 id: "); - print_thread_id (); - printf ("\n"); - pthread_mutex_unlock (&g_print_mutex); - } - else if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_SEGFAULT)) - { - g_threads_do_segfault = true; - } - else - { - // At this point we don't do anything else with threads. - // Later use thread index and send command to thread. - } - } - else - { - // Treat the argument as text for stdout. - printf("%s\n", argv[i]); - } - } - - // If we launched any threads, join them - for (std::vector::iterator it = threads.begin (); it != threads.end (); ++it) - { - void *thread_retval = nullptr; - const int err = ::pthread_join (*it, &thread_retval); - if (err != 0) - fprintf (stderr, "pthread_join() failed with error code %d\n", err); - } - - return return_value; -} Index: test/tools/lldb-gdbserver/socket_packet_pump.py =================================================================== --- test/tools/lldb-gdbserver/socket_packet_pump.py +++ /dev/null @@ -1,180 +0,0 @@ -import Queue -import re -import select -import threading -import traceback - -def _handle_output_packet_string(packet_contents): - if (not packet_contents) or (len(packet_contents) < 1): - return None - elif packet_contents[0] != "O": - return None - elif packet_contents == "OK": - return None - else: - return packet_contents[1:].decode("hex") - -def _dump_queue(the_queue): - while not the_queue.empty(): - print the_queue.get(True) - print "\n" - -class SocketPacketPump(object): - """A threaded packet reader that partitions packets into two streams. - - All incoming $O packet content is accumulated with the current accumulation - state put into the OutputQueue. - - All other incoming packets are placed in the packet queue. - - A select thread can be started and stopped, and runs to place packet - content into the two queues. - """ - - _GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}') - - def __init__(self, pump_socket, logger=None): - if not pump_socket: - raise Exception("pump_socket cannot be None") - - self._output_queue = Queue.Queue() - self._packet_queue = Queue.Queue() - self._thread = None - self._stop_thread = False - self._socket = pump_socket - self._logger = logger - self._receive_buffer = "" - self._accumulated_output = "" - - def __enter__(self): - """Support the python 'with' statement. - - Start the pump thread.""" - self.start_pump_thread() - return self - - def __exit__(self, exit_type, value, the_traceback): - """Support the python 'with' statement. - - Shut down the pump thread.""" - self.stop_pump_thread() - - # Warn if there is any content left in any of the queues. - # That would represent unmatched packets. - if not self.output_queue().empty(): - print "warning: output queue entries still exist:" - _dump_queue(self.output_queue()) - print "from here:" - traceback.print_stack() - - if not self.packet_queue().empty(): - print "warning: packet queue entries still exist:" - _dump_queue(self.packet_queue()) - print "from here:" - traceback.print_stack() - - def start_pump_thread(self): - if self._thread: - raise Exception("pump thread is already running") - self._stop_thread = False - self._thread = threading.Thread(target=self._run_method) - self._thread.start() - - def stop_pump_thread(self): - self._stop_thread = True - if self._thread: - self._thread.join() - - def output_queue(self): - return self._output_queue - - def packet_queue(self): - return self._packet_queue - - def _process_new_bytes(self, new_bytes): - if not new_bytes: - return - if len(new_bytes) < 1: - return - - # Add new bytes to our accumulated unprocessed packet bytes. - self._receive_buffer += new_bytes - - # Parse fully-formed packets into individual packets. - has_more = len(self._receive_buffer) > 0 - while has_more: - if len(self._receive_buffer) <= 0: - has_more = False - # handle '+' ack - elif self._receive_buffer[0] == "+": - self._packet_queue.put("+") - self._receive_buffer = self._receive_buffer[1:] - if self._logger: - self._logger.debug( - "parsed packet from stub: +\n" + - "new receive_buffer: {}".format( - self._receive_buffer)) - else: - packet_match = self._GDB_REMOTE_PACKET_REGEX.match( - self._receive_buffer) - if packet_match: - # Our receive buffer matches a packet at the - # start of the receive buffer. - new_output_content = _handle_output_packet_string( - packet_match.group(1)) - if new_output_content: - # This was an $O packet with new content. - self._accumulated_output += new_output_content - self._output_queue.put(self._accumulated_output) - else: - # Any packet other than $O. - self._packet_queue.put(packet_match.group(0)) - - # Remove the parsed packet from the receive - # buffer. - self._receive_buffer = self._receive_buffer[ - len(packet_match.group(0)):] - if self._logger: - self._logger.debug( - "parsed packet from stub: " + - packet_match.group(0)) - self._logger.debug( - "new receive_buffer: " + - self._receive_buffer) - else: - # We don't have enough in the receive bufferto make a full - # packet. Stop trying until we read more. - has_more = False - - def _run_method(self): - self._receive_buffer = "" - self._accumulated_output = "" - - if self._logger: - self._logger.info("socket pump starting") - - # Keep looping around until we're asked to stop the thread. - while not self._stop_thread: - can_read, _, _ = select.select([self._socket], [], [], 0) - if can_read and self._socket in can_read: - try: - new_bytes = self._socket.recv(4096) - if self._logger and new_bytes and len(new_bytes) > 0: - self._logger.debug( - "pump received bytes: {}".format(new_bytes)) - except: - # Likely a closed socket. Done with the pump thread. - if self._logger: - self._logger.debug( - "socket read failed, stopping pump read thread") - break - self._process_new_bytes(new_bytes) - - if self._logger: - self._logger.info("socket pump exiting") - - def get_accumulated_output(self): - return self._accumulated_output - - def get_receive_buffer(self): - return self._receive_buffer Index: test/tools/lldb-gdbserver/test/test_lldbgdbserverutils.py =================================================================== --- test/tools/lldb-gdbserver/test/test_lldbgdbserverutils.py +++ /dev/null @@ -1,53 +0,0 @@ -import os.path -import re -import sys - -# adjust path for embedded unittest2 -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..')) -import unittest2 - -# adjust path for lldbgdbserverutils.py -sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) -from lldbgdbserverutils import * - - -class TestLldbGdbServerUtils(unittest2.TestCase): - def test_entry_exact_payload_match(self): - entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") - entry.assert_match(self, "$OK#9a") - - def test_entry_exact_payload_match_ignores_checksum(self): - entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") - entry.assert_match(self, "$OK#00") - - def test_entry_creates_context(self): - entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") - context = entry.assert_match(self, "$OK#9a") - self.assertIsNotNone(context) - - def test_entry_regex_matches(self): - entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), capture={ 1:"thread_id" }) - context = entry.assert_match(self, "$QC980#00") - - def test_entry_regex_saves_match(self): - entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), capture={ 1:"thread_id" }) - context = entry.assert_match(self, "$QC980#00") - self.assertEquals(context["thread_id"], "980") - - def test_entry_regex_expect_captures_success(self): - context = { "thread_id":"980" } - entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), expect_captures={ 2:"thread_id" }) - entry.assert_match(self, "$T11thread:980;", context=context) - - def test_entry_regex_expect_captures_raises_on_fail(self): - context = { "thread_id":"980" } - entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), expect_captures={ 2:"thread_id" }) - try: - entry.assert_match(self, "$T11thread:970;", context=context) - self.fail() - except AssertionError: - # okay - return None - -if __name__ == '__main__': - unittest2.main() Index: test/tools/lldb-mi/signal/TestMiSignal.py =================================================================== --- test/tools/lldb-mi/signal/TestMiSignal.py +++ test/tools/lldb-mi/signal/TestMiSignal.py @@ -88,7 +88,7 @@ """Test that 'lldb-mi --interpreter' notifies after it was stopped on entry (remote).""" # Prepare debugserver - sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lldb-gdbserver"))) + sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lldb-server"))) import lldbgdbserverutils debugserver_exe = lldbgdbserverutils.get_debugserver_exe() if not debugserver_exe: @@ -164,7 +164,7 @@ """Test that 'lldb-mi --interpreter' notifies after it was stopped when segfault occurred (remote).""" # Prepare debugserver - sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lldb-gdbserver"))) + sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "lldb-server"))) import lldbgdbserverutils debugserver_exe = lldbgdbserverutils.get_debugserver_exe() if not debugserver_exe: Index: test/tools/lldb-server/Makefile =================================================================== --- /dev/null +++ test/tools/lldb-server/Makefile @@ -0,0 +1,8 @@ +LEVEL = ../../make + +CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS +ENABLE_THREADS := YES +CXX_SOURCES := main.cpp +MAKE_DSYM :=NO + +include $(LEVEL)/Makefile.rules Index: test/tools/lldb-server/TestGdbRemoteAttach.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteAttach.py @@ -0,0 +1,122 @@ +import gdbremote_testcase +import lldbgdbserverutils +import unittest2 + +from lldbtest import * + +class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase): + + def attach_with_vAttach(self): + # Start the inferior, start the debug monitor, nothing is attached yet. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:60"]) + self.assertIsNotNone(procs) + + # Make sure the target process has been launched. + inferior = procs.get("inferior") + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + self.assertTrue(lldbgdbserverutils.process_is_running(inferior.pid, True)) + + # Add attach packets. + self.test_sequence.add_log_lines([ + # Do the attach. + "read packet: $vAttach;{:x}#00".format(inferior.pid), + # Expect a stop notification from the attach. + { "direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$", "capture":{1:"stop_signal_hex"} }, + ], True) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, inferior.pid) + + @debugserver_test + @dsym_test + def test_attach_with_vAttach_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() + + @llgs_test + @dwarf_test + def test_attach_with_vAttach_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() + + +if __name__ == '__main__': + unittest2.main() +import gdbremote_testcase +import lldbgdbserverutils +import unittest2 + +from lldbtest import * + +class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase): + + def attach_with_vAttach(self): + # Start the inferior, start the debug monitor, nothing is attached yet. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:60"]) + self.assertIsNotNone(procs) + + # Make sure the target process has been launched. + inferior = procs.get("inferior") + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + self.assertTrue(lldbgdbserverutils.process_is_running(inferior.pid, True)) + + # Add attach packets. + self.test_sequence.add_log_lines([ + # Do the attach. + "read packet: $vAttach;{:x}#00".format(inferior.pid), + # Expect a stop notification from the attach. + { "direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$", "capture":{1:"stop_signal_hex"} }, + ], True) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, inferior.pid) + + @debugserver_test + @dsym_test + def test_attach_with_vAttach_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() + + @llgs_test + @dwarf_test + def test_attach_with_vAttach_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemoteAuxvSupport.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteAuxvSupport.py @@ -0,0 +1,208 @@ +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase): + + AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read" + + def has_auxv_support(self): + inferior_args = ["message:main entered", "sleep:5"] + procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) + + # Don't do anything until we match the launched inferior main entry output. + # Then immediately interrupt the process. + # This prevents auxv data being asked for before it's ready and leaves + # us in a stopped state. + self.test_sequence.add_log_lines([ + # Start the inferior... + "read packet: $c#63", + # ... match output.... + { "type":"output_match", "regex":r"^message:main entered\r\n$" }, + ], True) + # ... then interrupt. + self.add_interrupt_packets() + self.add_qSupported_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + features = self.parse_qSupported_response(context) + return self.AUXV_SUPPORT_FEATURE_NAME in features and features[self.AUXV_SUPPORT_FEATURE_NAME] == "+" + + def get_raw_auxv_data(self): + # Start up llgs and inferior, and check for auxv support. + if not self.has_auxv_support(): + self.skipTest("auxv data not supported") + + # Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target. + # Auxv is specified in terms of pairs of unsigned longs. + self.reset_test_sequence() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + proc_info = self.parse_process_info_response(context) + self.assertIsNotNone(proc_info) + self.assertTrue("ptrsize" in proc_info) + word_size = int(proc_info["ptrsize"]) + + OFFSET = 0 + LENGTH = 0x400 + + # Grab the auxv data. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + "read packet: $qXfer:auxv:read::{:x},{:x}:#00".format(OFFSET, LENGTH), + {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} } + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure we end up with all auxv data in one packet. + # FIXME don't assume it all comes back in one packet. + self.assertEquals(context.get("response_type"), "l") + + # Decode binary data. + content_raw = context.get("content_raw") + self.assertIsNotNone(content_raw) + return (word_size, self.decode_gdbremote_binary(content_raw)) + + def supports_auxv(self): + # When non-auxv platforms support llgs, skip the test on platforms + # that don't support auxv. + self.assertTrue(self.has_auxv_support()) + + # + # We skip the "supports_auxv" test on debugserver. The rest of the tests + # appropriately skip the auxv tests if the support flag is not present + # in the qSupported response, so the debugserver test bits are still there + # in case debugserver code one day does have auxv support and thus those + # tests don't get skipped. + # + + @llgs_test + @dwarf_test + def test_supports_auxv_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.supports_auxv() + + def auxv_data_is_correct_size(self): + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Ensure auxv data is a multiple of 2*word_size (there should be two unsigned long fields per auxv entry). + self.assertEquals(len(auxv_data) % (2*word_size), 0) + # print "auxv contains {} entries".format(len(auxv_data) / (2*word_size)) + + @debugserver_test + @dsym_test + def test_auxv_data_is_correct_size_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.auxv_data_is_correct_size() + + @llgs_test + @dwarf_test + def test_auxv_data_is_correct_size_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.auxv_data_is_correct_size() + + def auxv_keys_look_valid(self): + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Grab endian. + self.reset_test_sequence() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) + self.assertIsNotNone(auxv_dict) + + # Verify keys look reasonable. + for auxv_key in auxv_dict: + self.assertTrue(auxv_key >= 1) + self.assertTrue(auxv_key <= 1000) + # print "auxv dict: {}".format(auxv_dict) + + @debugserver_test + @dsym_test + def test_auxv_keys_look_valid_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.auxv_keys_look_valid() + + @llgs_test + @dwarf_test + def test_auxv_keys_look_valid_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.auxv_keys_look_valid() + + def auxv_chunked_reads_work(self): + # Verify that multiple smaller offset,length reads of auxv data + # return the same data as a single larger read. + + # Grab the auxv data with a single large read here. + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Grab endian. + self.reset_test_sequence() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) + self.assertIsNotNone(auxv_dict) + + iterated_auxv_data = self.read_binary_data_in_chunks("qXfer:auxv:read::", 2*word_size) + self.assertIsNotNone(iterated_auxv_data) + + auxv_dict_iterated = self.build_auxv_dict(endian, word_size, iterated_auxv_data) + self.assertIsNotNone(auxv_dict_iterated) + + # Verify both types of data collection returned same content. + self.assertEquals(auxv_dict_iterated, auxv_dict) + + @debugserver_test + @dsym_test + def test_auxv_chunked_reads_work_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.auxv_chunked_reads_work() + + @llgs_test + @dwarf_test + def test_auxv_chunked_reads_work_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.auxv_chunked_reads_work() + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py @@ -0,0 +1,154 @@ +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemoteExpeditedRegisters(gdbremote_testcase.GdbRemoteTestCaseBase): + + def gather_expedited_registers(self): + # Setup the stub and set the gdb remote command stream. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) + self.test_sequence.add_log_lines([ + # Start up the inferior. + "read packet: $c#63", + # Immediately tell it to stop. We want to see what it reports. + "read packet: {}".format(chr(03)), + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, + ], True) + + # Run the gdb remote command stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Pull out expedited registers. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + + expedited_registers = self.extract_registers_from_stop_notification(key_vals_text) + self.assertIsNotNone(expedited_registers) + + return expedited_registers + + def stop_notification_contains_generic_register(self, generic_register_name): + # Generate a stop reply, parse out expedited registers from stop notification. + expedited_registers = self.gather_expedited_registers() + self.assertIsNotNone(expedited_registers) + self.assertTrue(len(expedited_registers) > 0) + + # Gather target register infos. + reg_infos = self.gather_register_infos() + + # Find the generic register. + reg_info = self.find_generic_register_with_name(reg_infos, generic_register_name) + self.assertIsNotNone(reg_info) + + # Ensure the expedited registers contained it. + self.assertTrue(reg_info["lldb_register_index"] in expedited_registers) + # print "{} reg_info:{}".format(generic_register_name, reg_info) + + def stop_notification_contains_any_registers(self): + # Generate a stop reply, parse out expedited registers from stop notification. + expedited_registers = self.gather_expedited_registers() + # Verify we have at least one expedited register. + self.assertTrue(len(expedited_registers) > 0) + + @debugserver_test + @dsym_test + def test_stop_notification_contains_any_registers_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_notification_contains_any_registers() + + @llgs_test + @dwarf_test + def test_stop_notification_contains_any_registers_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_notification_contains_any_registers() + + def stop_notification_contains_no_duplicate_registers(self): + # Generate a stop reply, parse out expedited registers from stop notification. + expedited_registers = self.gather_expedited_registers() + # Verify no expedited register was specified multiple times. + for (reg_num, value) in expedited_registers.items(): + if (type(value) == list) and (len(value) > 0): + self.fail("expedited register number {} specified more than once ({} times)".format(reg_num, len(value))) + + @debugserver_test + @dsym_test + def test_stop_notification_contains_no_duplicate_registers_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_notification_contains_no_duplicate_registers() + + @llgs_test + @dwarf_test + def test_stop_notification_contains_no_duplicate_registers_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_notification_contains_no_duplicate_registers() + + def stop_notification_contains_pc_register(self): + self.stop_notification_contains_generic_register("pc") + + @debugserver_test + @dsym_test + def test_stop_notification_contains_pc_register_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_notification_contains_pc_register() + + @llgs_test + @dwarf_test + def test_stop_notification_contains_pc_register_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_notification_contains_pc_register() + + def stop_notification_contains_fp_register(self): + self.stop_notification_contains_generic_register("fp") + + @debugserver_test + @dsym_test + def test_stop_notification_contains_fp_register_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_notification_contains_fp_register() + + @llgs_test + @dwarf_test + def test_stop_notification_contains_fp_register_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_notification_contains_fp_register() + + def stop_notification_contains_sp_register(self): + self.stop_notification_contains_generic_register("sp") + + @debugserver_test + @dsym_test + def test_stop_notification_contains_sp_register_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_notification_contains_sp_register() + + @llgs_test + @dwarf_test + def test_stop_notification_contains_sp_register_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_notification_contains_sp_register() + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemoteKill.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteKill.py @@ -0,0 +1,49 @@ +import unittest2 + +import gdbremote_testcase +import lldbgdbserverutils + +from lldbtest import * + +class TestGdbRemoteKill(gdbremote_testcase.GdbRemoteTestCaseBase): + def attach_commandline_kill_after_initial_stop(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines([ + "read packet: $k#6b", + {"direction":"send", "regex":r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}" }, + ], True) + + if self.stub_sends_two_stop_notifications_on_kill: + # Add an expectation for a second X result for stubs that send two of these. + self.test_sequence.add_log_lines([ + {"direction":"send", "regex":r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}" }, + ], True) + + self.expect_gdbremote_sequence() + + # Wait a moment for completed and now-detached inferior process to clear. + time.sleep(1) + + # Process should be dead now. Reap results. + poll_result = procs["inferior"].poll() + self.assertIsNotNone(poll_result) + + # Where possible, verify at the system level that the process is not running. + self.assertFalse(lldbgdbserverutils.process_is_running(procs["inferior"].pid, False)) + + @debugserver_test + @dsym_test + def test_attach_commandline_kill_after_initial_stop_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.attach_commandline_kill_after_initial_stop() + + @llgs_test + @dwarf_test + def test_attach_commandline_kill_after_initial_stop_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.attach_commandline_kill_after_initial_stop() + Index: test/tools/lldb-server/TestGdbRemoteProcessInfo.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteProcessInfo.py @@ -0,0 +1,188 @@ +import gdbremote_testcase +import lldbgdbserverutils +import sys +import unittest2 + +from lldbtest import * + +class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + def qProcessInfo_returns_running_process(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id looks reasonable. + pid_text = process_info.get("pid") + self.assertIsNotNone(pid_text) + pid = int(pid_text, base=16) + self.assertNotEqual(0, pid) + + # If possible, verify that the process is running. + self.assertTrue(lldbgdbserverutils.process_is_running(pid, True)) + + @debugserver_test + @dsym_test + def test_qProcessInfo_returns_running_process_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.qProcessInfo_returns_running_process() + + @llgs_test + @dwarf_test + def test_qProcessInfo_returns_running_process_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.qProcessInfo_returns_running_process() + + def attach_commandline_qProcessInfo_reports_correct_pid(self): + procs = self.prep_debug_monitor_and_inferior() + self.assertIsNotNone(procs) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, procs["inferior"].pid) + + @debugserver_test + @dsym_test + def test_attach_commandline_qProcessInfo_reports_correct_pid_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.attach_commandline_qProcessInfo_reports_correct_pid() + + @llgs_test + @dwarf_test + def test_attach_commandline_qProcessInfo_reports_correct_pid_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.attach_commandline_qProcessInfo_reports_correct_pid() + + def qProcessInfo_reports_valid_endian(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id looks reasonable. + endian = process_info.get("endian") + self.assertIsNotNone(endian) + self.assertTrue(endian in ["little", "big", "pdp"]) + + @debugserver_test + @dsym_test + def test_qProcessInfo_reports_valid_endian_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.qProcessInfo_reports_valid_endian() + + @llgs_test + @dwarf_test + def test_qProcessInfo_reports_valid_endian_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.qProcessInfo_reports_valid_endian() + + def qProcessInfo_contains_keys(self, expected_key_set): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the expected keys are present and non-None within the process info. + missing_key_set = set() + for expected_key in expected_key_set: + if expected_key not in process_info: + missing_key_set.add(expected_key) + + self.assertEquals(missing_key_set, set(), "the listed keys are missing in the qProcessInfo result") + + def qProcessInfo_does_not_contain_keys(self, absent_key_set): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the unexpected keys are not present + unexpected_key_set = set() + for unexpected_key in absent_key_set: + if unexpected_key in process_info: + unexpected_key_set.add(unexpected_key) + + self.assertEquals(unexpected_key_set, set(), "the listed keys were present but unexpected in qProcessInfo result") + + @unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin") + @debugserver_test + @dsym_test + def test_qProcessInfo_contains_cputype_cpusubtype_debugserver_darwin(self): + self.init_debugserver_test() + self.buildDsym() + self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype'])) + + @unittest2.skipUnless(sys.platform.startswith("linux"), "requires Linux") + @llgs_test + @dwarf_test + def test_qProcessInfo_contains_triple_llgs_linux(self): + self.init_llgs_test() + self.buildDwarf() + self.qProcessInfo_contains_keys(set(['triple'])) + + @unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin") + @debugserver_test + @dsym_test + def test_qProcessInfo_does_not_contain_triple_debugserver_darwin(self): + self.init_debugserver_test() + self.buildDsym() + # We don't expect to see triple on darwin. If we do, we'll prefer triple + # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup + # for the remote Host and Process. + self.qProcessInfo_does_not_contain_keys(set(['triple'])) + + @unittest2.skipUnless(sys.platform.startswith("linux"), "requires Linux") + @llgs_test + @dwarf_test + def test_qProcessInfo_does_not_contain_cputype_cpusubtype_llgs_linux(self): + self.init_llgs_test() + self.buildDwarf() + self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype'])) + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemoteRegisterState.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteRegisterState.py @@ -0,0 +1,130 @@ +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemoteRegisterState(gdbremote_testcase.GdbRemoteTestCaseBase): + """Test QSaveRegisterState/QRestoreRegisterState support.""" + + def grp_register_save_restore_works(self, with_suffix): + # Start up the process, use thread suffix, grab main thread id. + inferior_args = ["message:main entered", "sleep:5"] + procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) + + self.add_process_info_collection_packets() + self.add_register_info_collection_packets() + if with_suffix: + self.add_thread_suffix_request_packets() + self.add_threadinfo_collection_packets() + self.test_sequence.add_log_lines([ + # Start the inferior... + "read packet: $c#63", + # ... match output.... + { "type":"output_match", "regex":r"^message:main entered\r\n$" }, + ], True) + # ... then interrupt. + self.add_interrupt_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Gather register info. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + # Pull out the register infos that we think we can bit flip successfully. + gpr_reg_infos = [reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] + self.assertTrue(len(gpr_reg_infos) > 0) + + # Gather thread info. + if with_suffix: + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + thread_id = threads[0] + self.assertIsNotNone(thread_id) + # print "Running on thread: 0x{:x}".format(thread_id) + else: + thread_id = None + + # Save register state. + self.reset_test_sequence() + self.add_QSaveRegisterState_packets(thread_id) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + (success, state_id) = self.parse_QSaveRegisterState_response(context) + self.assertTrue(success) + self.assertIsNotNone(state_id) + # print "saved register state id: {}".format(state_id) + + # Remember initial register values. + initial_reg_values = self.read_register_values(gpr_reg_infos, endian, thread_id=thread_id) + # print "initial_reg_values: {}".format(initial_reg_values) + + # Flip gpr register values. + (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(gpr_reg_infos, endian, thread_id=thread_id) + # print "successful writes: {}, failed writes: {}".format(successful_writes, failed_writes) + self.assertTrue(successful_writes > 0) + + flipped_reg_values = self.read_register_values(gpr_reg_infos, endian, thread_id=thread_id) + # print "flipped_reg_values: {}".format(flipped_reg_values) + + # Restore register values. + self.reset_test_sequence() + self.add_QRestoreRegisterState_packets(state_id, thread_id) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify registers match initial register values. + final_reg_values = self.read_register_values(gpr_reg_infos, endian, thread_id=thread_id) + # print "final_reg_values: {}".format(final_reg_values) + self.assertIsNotNone(final_reg_values) + self.assertEquals(final_reg_values, initial_reg_values) + + @debugserver_test + @dsym_test + def test_grp_register_save_restore_works_with_suffix_debugserver_dsym(self): + USE_THREAD_SUFFIX = True + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @llgs_test + @dwarf_test + def test_grp_register_save_restore_works_with_suffix_llgs_dwarf(self): + USE_THREAD_SUFFIX = True + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @debugserver_test + @dsym_test + def test_grp_register_save_restore_works_no_suffix_debugserver_dsym(self): + USE_THREAD_SUFFIX = False + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @llgs_test + @dwarf_test + def test_grp_register_save_restore_works_no_suffix_llgs_dwarf(self): + USE_THREAD_SUFFIX = False + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemoteSingleStep.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteSingleStep.py @@ -0,0 +1,25 @@ +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase): + + @debugserver_test + @dsym_test + def test_single_step_only_steps_one_instruction_with_s_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="s") + + @llgs_test + @dwarf_test + def test_single_step_only_steps_one_instruction_with_s_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="s") + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py @@ -0,0 +1,172 @@ +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemoteThreadsInStopReply(gdbremote_testcase.GdbRemoteTestCaseBase): + + ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [ + "read packet: $QListThreadsInStopReply#21", + "send packet: $OK#00", + ] + + def gather_stop_reply_threads(self, post_startup_log_lines, thread_count): + # Set up the inferior args. + inferior_args=[] + for i in range(thread_count - 1): + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) + + # Assumes test_sequence has anything added needed to setup the initial state. + # (Like optionally enabling QThreadsInStopReply.) + if post_startup_log_lines: + self.test_sequence.add_log_lines(post_startup_log_lines, True) + self.test_sequence.add_log_lines([ + "read packet: $c#63" + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Give threads time to start up, then break. + time.sleep(1) + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + "read packet: {}".format(chr(03)), + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Wait until all threads have started. + threads = self.wait_for_thread_count(thread_count, timeout_seconds=3) + self.assertIsNotNone(threads) + self.assertEquals(len(threads), thread_count) + + # Run, then stop the process, grab the stop reply content. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + "read packet: $c#63", + "read packet: {}".format(chr(03)), + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse the stop reply contents. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + kv_dict = self.parse_key_val_dict(key_vals_text) + self.assertIsNotNone(kv_dict) + + # Pull out threads from stop response. + stop_reply_threads_text = kv_dict.get("threads") + if stop_reply_threads_text: + return [int(thread_id, 16) for thread_id in stop_reply_threads_text.split(",")] + else: + return [] + + def QListThreadsInStopReply_supported(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines(self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @dsym_test + def test_QListThreadsInStopReply_supported_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.QListThreadsInStopReply_supported() + + @llgs_test + @dwarf_test + def test_QListThreadsInStopReply_supported_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.QListThreadsInStopReply_supported() + + def stop_reply_reports_multiple_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is enabled. + stop_reply_threads = self.gather_stop_reply_threads(self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + self.assertEquals(len(stop_reply_threads), thread_count) + + @debugserver_test + @dsym_test + def test_stop_reply_reports_multiple_threads_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_reply_reports_multiple_threads(5) + + @llgs_test + @dwarf_test + def test_stop_reply_reports_multiple_threads_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_reply_reports_multiple_threads(5) + + def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is not enabled. + stop_reply_threads = self.gather_stop_reply_threads(None, thread_count) + self.assertEquals(len(stop_reply_threads), 0) + + @debugserver_test + @dsym_test + def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.no_QListThreadsInStopReply_supplies_no_threads(5) + + @llgs_test + @dwarf_test + def test_no_QListThreadsInStopReply_supplies_no_threads_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.no_QListThreadsInStopReply_supplies_no_threads(5) + + def stop_reply_reports_correct_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is enabled. + stop_reply_threads = self.gather_stop_reply_threads(self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + self.assertEquals(len(stop_reply_threads), thread_count) + + # Gather threads from q{f,s}ThreadInfo. + self.reset_test_sequence() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + self.assertEquals(len(threads), thread_count) + + # Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads + for tid in threads: + self.assertTrue(tid in stop_reply_threads) + + @debugserver_test + @dsym_test + def test_stop_reply_reports_correct_threads_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.stop_reply_reports_correct_threads(5) + + @llgs_test + @dwarf_test + def test_stop_reply_reports_correct_threads_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.stop_reply_reports_correct_threads(5) + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py @@ -0,0 +1,155 @@ +import sys +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemote_qThreadStopInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + THREAD_COUNT = 5 + + def gather_stop_replies_via_qThreadStopInfo(self, thread_count): + # Set up the inferior args. + inferior_args=[] + for i in range(thread_count - 1): + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) + + # Assumes test_sequence has anything added needed to setup the initial state. + # (Like optionally enabling QThreadsInStopReply.) + self.test_sequence.add_log_lines([ + "read packet: $c#63" + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Give threads time to start up, then break. + time.sleep(1) + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + "read packet: {}".format(chr(03)), + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Wait until all threads have started. + threads = self.wait_for_thread_count(thread_count, timeout_seconds=3) + self.assertIsNotNone(threads) + self.assertEquals(len(threads), thread_count) + + # Grab stop reply for each thread via qThreadStopInfo{tid:hex}. + stop_replies = {} + thread_dicts = {} + for thread in threads: + # Run the qThreadStopInfo command. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + "read packet: $qThreadStopInfo{:x}#00".format(thread), + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result", 2:"key_vals_text"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse stop reply contents. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + kv_dict = self.parse_key_val_dict(key_vals_text) + self.assertIsNotNone(kv_dict) + + # Verify there is a thread and that it matches the expected thread id. + kv_thread = kv_dict.get("thread") + self.assertIsNotNone(kv_thread) + kv_thread_id = int(kv_thread, 16) + self.assertEquals(kv_thread_id, thread) + + # Grab the stop id reported. + stop_result_text = context.get("stop_result") + self.assertIsNotNone(stop_result_text) + stop_replies[kv_thread_id] = int(stop_result_text, 16) + + # Hang on to the key-val dictionary for the thread. + thread_dicts[kv_thread_id] = kv_dict + + return (stop_replies, thread_dicts) + + def qThreadStopInfo_works_for_multiple_threads(self, thread_count): + (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertEquals(len(stop_replies), thread_count) + + @debugserver_test + @dsym_test + def test_qThreadStopInfo_works_for_multiple_threads_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) + + @llgs_test + @dwarf_test + def test_qThreadStopInfo_works_for_multiple_threads_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) + + def qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self, thread_count): + (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertIsNotNone(stop_replies) + + no_stop_reason_count = sum(1 for stop_reason in stop_replies.values() if stop_reason == 0) + with_stop_reason_count = sum(1 for stop_reason in stop_replies.values() if stop_reason != 0) + + # All but one thread should report no stop reason. + self.assertEqual(no_stop_reason_count, thread_count - 1) + + # Only one thread should should indicate a stop reason. + self.assertEqual(with_stop_reason_count, 1) + + @debugserver_test + @dsym_test + def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self.THREAD_COUNT) + + @llgs_test + @dwarf_test + def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(self.THREAD_COUNT) + + def qThreadStopInfo_has_valid_thread_names(self, thread_count, expected_thread_name): + (_, thread_dicts) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertIsNotNone(thread_dicts) + + for thread_dict in thread_dicts.values(): + name = thread_dict.get("name") + self.assertIsNotNone(name) + self.assertEquals(name, expected_thread_name) + + @unittest2.skip("MacOSX doesn't have a default thread name") + @debugserver_test + @dsym_test + def test_qThreadStopInfo_has_valid_thread_names_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") + + @unittest2.skipUnless(sys.platform.startswith("linux"), "test requires OS with set, equal thread names by default") + @llgs_test + @dwarf_test + def test_qThreadStopInfo_has_valid_thread_names_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestGdbRemote_vCont.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestGdbRemote_vCont.py @@ -0,0 +1,125 @@ +import unittest2 + +import gdbremote_testcase +from lldbtest import * + +class TestGdbRemote_vCont(gdbremote_testcase.GdbRemoteTestCaseBase): + + def vCont_supports_mode(self, mode, inferior_args=None): + # Setup the stub and set the gdb remote command stream. + procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) + self.add_vCont_query_packets() + + # Run the gdb remote command stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Pull out supported modes. + supported_vCont_modes = self.parse_vCont_query_response(context) + self.assertIsNotNone(supported_vCont_modes) + + # Verify we support the given mode. + self.assertTrue(mode in supported_vCont_modes) + + def vCont_supports_c(self): + self.vCont_supports_mode("c") + + def vCont_supports_C(self): + self.vCont_supports_mode("C") + + def vCont_supports_s(self): + self.vCont_supports_mode("s") + + def vCont_supports_S(self): + self.vCont_supports_mode("S") + + @debugserver_test + @dsym_test + def test_vCont_supports_c_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.vCont_supports_c() + + @llgs_test + def test_vCont_supports_c_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.vCont_supports_c() + + @debugserver_test + @dsym_test + def test_vCont_supports_C_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.vCont_supports_C() + + @llgs_test + @dwarf_test + def test_vCont_supports_C_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.vCont_supports_C() + + @debugserver_test + @dsym_test + def test_vCont_supports_s_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.vCont_supports_s() + + @llgs_test + @dwarf_test + def test_vCont_supports_s_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.vCont_supports_s() + + @debugserver_test + @dsym_test + def test_vCont_supports_S_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.vCont_supports_S() + + @llgs_test + @dwarf_test + def test_vCont_supports_S_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.vCont_supports_S() + + @debugserver_test + @dsym_test + def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="vCont;s") + + @llgs_test + @dwarf_test + def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction(use_Hc_packet=True, step_instruction="vCont;s") + + @debugserver_test + @dsym_test + def test_single_step_only_steps_one_instruction_with_vCont_s_thread_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction(use_Hc_packet=False, step_instruction="vCont;s:{thread}") + + @llgs_test + @dwarf_test + def test_single_step_only_steps_one_instruction_with_vCont_s_thread_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction(use_Hc_packet=False, step_instruction="vCont;s:{thread}") + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/TestLldbGdbServer.py =================================================================== --- /dev/null +++ test/tools/lldb-server/TestLldbGdbServer.py @@ -0,0 +1,1501 @@ +""" +Test case for testing the gdbremote protocol. + +Tests run against debugserver and lldb-gdbserver (llgs). +lldb-gdbserver tests run where the lldb-gdbserver exe is +available. + +This class will be broken into smaller test case classes by +gdb remote packet functional areas. For now it contains +the initial set of tests implemented. +""" + +import gdbremote_testcase +import lldbgdbserverutils +import platform +import signal +import unittest2 +from lldbtest import * + +class LldbGdbServerTestCase(gdbremote_testcase.GdbRemoteTestCaseBase): + + @debugserver_test + def test_exe_starts_debugserver(self): + self.init_debugserver_test() + server = self.connect_to_debug_monitor() + + @llgs_test + def test_exe_starts_llgs(self): + self.init_llgs_test() + server = self.connect_to_debug_monitor() + + def start_no_ack_mode(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.expect_gdbremote_sequence() + + @debugserver_test + def test_start_no_ack_mode_debugserver(self): + self.init_debugserver_test() + self.start_no_ack_mode() + + @llgs_test + def test_start_no_ack_mode_llgs(self): + self.init_llgs_test() + self.start_no_ack_mode() + + def thread_suffix_supported(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["lldb-gdbserver < 26> read packet: $QThreadSuffixSupported#e4", + "lldb-gdbserver < 6> send packet: $OK#9a"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + def test_thread_suffix_supported_debugserver(self): + self.init_debugserver_test() + self.thread_suffix_supported() + + @llgs_test + def test_thread_suffix_supported_llgs(self): + self.init_llgs_test() + self.thread_suffix_supported() + + def list_threads_in_stop_reply_supported(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["lldb-gdbserver < 27> read packet: $QListThreadsInStopReply#21", + "lldb-gdbserver < 6> send packet: $OK#9a"], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + def test_list_threads_in_stop_reply_supported_debugserver(self): + self.init_debugserver_test() + self.list_threads_in_stop_reply_supported() + + @llgs_test + def test_list_threads_in_stop_reply_supported_llgs(self): + self.init_llgs_test() + self.list_threads_in_stop_reply_supported() + + def start_inferior(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args = [os.path.abspath('a.out')] + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["read packet: %s" % lldbgdbserverutils.build_gdbremote_A_packet(launch_args), + "send packet: $OK#9a"], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + @dsym_test + def test_start_inferior_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.start_inferior() + + @llgs_test + @dwarf_test + def test_start_inferior_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.start_inferior() + + def inferior_exit_0(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args = [os.path.abspath('a.out')] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W00#00"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @dsym_test + def test_inferior_exit_0_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.inferior_exit_0() + + @llgs_test + @dwarf_test + def test_inferior_exit_0_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.inferior_exit_0() + + def inferior_exit_42(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + RETVAL = 42 + + # build launch args + launch_args = [os.path.abspath('a.out'), "retval:%d" % RETVAL] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W{0:02x}#00".format(RETVAL)], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @dsym_test + def test_inferior_exit_42_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.inferior_exit_42() + + @llgs_test + @dwarf_test + def test_inferior_exit_42_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.inferior_exit_42() + + def c_packet_works(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args = [os.path.abspath('a.out')] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $c#63", + "send packet: $W00#00"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @dsym_test + def test_c_packet_works_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.c_packet_works() + + @llgs_test + @dwarf_test + def test_c_packet_works_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.c_packet_works() + + def inferior_print_exit(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args = [os.path.abspath('a.out'), "hello, world"] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + {"type":"output_match", "regex":r"^hello, world\r\n$" }, + "send packet: $W00#00"], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @dsym_test + def test_inferior_print_exit_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.inferior_print_exit() + + @llgs_test + @dwarf_test + def test_inferior_print_exit_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.inferior_print_exit() + + def first_launch_stop_reply_thread_matches_first_qC(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args = [os.path.abspath('a.out'), "hello, world"] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $qC#00", + { "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} }, + "read packet: $?#00", + { "direction":"send", "regex":r"^\$T[0-9a-fA-F]{2}thread:([0-9a-fA-F]+)", "expect_captures":{1:"thread_id"} }], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + @dsym_test + def test_first_launch_stop_reply_thread_matches_first_qC_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.first_launch_stop_reply_thread_matches_first_qC() + + @llgs_test + @dwarf_test + def test_first_launch_stop_reply_thread_matches_first_qC_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.first_launch_stop_reply_thread_matches_first_qC() + + def attach_commandline_continue_app_exits(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W00#00"], + True) + self.expect_gdbremote_sequence() + + # Wait a moment for completed and now-detached inferior process to clear. + time.sleep(1) + + # Process should be dead now. Reap results. + poll_result = procs["inferior"].poll() + self.assertIsNotNone(poll_result) + + # Where possible, verify at the system level that the process is not running. + self.assertFalse(lldbgdbserverutils.process_is_running(procs["inferior"].pid, False)) + + @debugserver_test + @dsym_test + def test_attach_commandline_continue_app_exits_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.attach_commandline_continue_app_exits() + + @llgs_test + @dwarf_test + def test_attach_commandline_continue_app_exits_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.attach_commandline_continue_app_exits() + + def qRegisterInfo_returns_one_valid_result(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build launch args + launch_args = [os.path.abspath('a.out')] + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $qRegisterInfo0#00", + { "direction":"send", "regex":r"^\$(.+);#[0-9A-Fa-f]{2}", "capture":{1:"reginfo_0"} }], + True) + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + reg_info_packet = context.get("reginfo_0") + self.assertIsNotNone(reg_info_packet) + self.assert_valid_reg_info(lldbgdbserverutils.parse_reg_info_response(reg_info_packet)) + + @debugserver_test + @dsym_test + def test_qRegisterInfo_returns_one_valid_result_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.qRegisterInfo_returns_one_valid_result() + + @llgs_test + @dwarf_test + def test_qRegisterInfo_returns_one_valid_result_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.qRegisterInfo_returns_one_valid_result() + + def qRegisterInfo_returns_all_valid_results(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build launch args. + launch_args = [os.path.abspath('a.out')] + + # Build the expected protocol stream. + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Validate that each register info returned validates. + for reg_info in self.parse_register_info_packets(context): + self.assert_valid_reg_info(reg_info) + + @debugserver_test + @dsym_test + def test_qRegisterInfo_returns_all_valid_results_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.qRegisterInfo_returns_all_valid_results() + + @llgs_test + @dwarf_test + def test_qRegisterInfo_returns_all_valid_results_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.qRegisterInfo_returns_all_valid_results() + + def qRegisterInfo_contains_required_generics(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build launch args + launch_args = [os.path.abspath('a.out')] + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all generic registers found. + generic_regs = { reg_info['generic']:1 for reg_info in reg_infos if 'generic' in reg_info } + + # Ensure we have a program counter register. + self.assertTrue('pc' in generic_regs) + + # Ensure we have a frame pointer register. + self.assertTrue('fp' in generic_regs) + + # Ensure we have a stack pointer register. + self.assertTrue('sp' in generic_regs) + + # Ensure we have a flags register. + self.assertTrue('flags' in generic_regs) + + @debugserver_test + @dsym_test + def test_qRegisterInfo_contains_required_generics_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.qRegisterInfo_contains_required_generics() + + @llgs_test + @dwarf_test + def test_qRegisterInfo_contains_required_generics_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.qRegisterInfo_contains_required_generics() + + def qRegisterInfo_contains_at_least_one_register_set(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build launch args + launch_args = [os.path.abspath('a.out')] + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all register sets found. + register_sets = { reg_info['set']:1 for reg_info in reg_infos if 'set' in reg_info } + self.assertTrue(len(register_sets) >= 1) + + @debugserver_test + @dsym_test + def test_qRegisterInfo_contains_at_least_one_register_set_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.qRegisterInfo_contains_at_least_one_register_set() + + @llgs_test + @dwarf_test + def test_qRegisterInfo_contains_at_least_one_register_set_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.qRegisterInfo_contains_at_least_one_register_set() + + def qRegisterInfo_contains_avx_registers_on_linux_x86_64(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build launch args + launch_args = [os.path.abspath('a.out')] + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all generics found. + register_sets = { reg_info['set']:1 for reg_info in reg_infos if 'set' in reg_info } + self.assertTrue("Advanced Vector Extensions" in register_sets) + + @llgs_test + @dwarf_test + def test_qRegisterInfo_contains_avx_registers_on_linux_x86_64_llgs_dwarf(self): + # Skip this test if not Linux x86_64. + if platform.system() != "Linux" or platform.processor() != "x86_64": + self.skipTest("linux x86_64 test") + + self.init_llgs_test() + self.buildDwarf() + self.qRegisterInfo_contains_avx_registers_on_linux_x86_64() + + def qThreadInfo_contains_thread(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_threadinfo_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather threadinfo entries. + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + # We should have exactly one thread. + self.assertEqual(len(threads), 1) + + @debugserver_test + @dsym_test + def test_qThreadInfo_contains_thread_launch_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qThreadInfo_contains_thread() + + @llgs_test + @dwarf_test + def test_qThreadInfo_contains_thread_launch_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qThreadInfo_contains_thread() + + @debugserver_test + @dsym_test + def test_qThreadInfo_contains_thread_attach_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.qThreadInfo_contains_thread() + + @llgs_test + @dwarf_test + def test_qThreadInfo_contains_thread_attach_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.qThreadInfo_contains_thread() + + def qThreadInfo_matches_qC(self): + procs = self.prep_debug_monitor_and_inferior() + + self.add_threadinfo_collection_packets() + self.test_sequence.add_log_lines( + ["read packet: $qC#00", + { "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} } + ], True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather threadinfo entries. + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + # We should have exactly one thread from threadinfo. + self.assertEqual(len(threads), 1) + + # We should have a valid thread_id from $QC. + QC_thread_id_hex = context.get("thread_id") + self.assertIsNotNone(QC_thread_id_hex) + QC_thread_id = int(QC_thread_id_hex, 16) + + # Those two should be the same. + self.assertEquals(threads[0], QC_thread_id) + + @debugserver_test + @dsym_test + def test_qThreadInfo_matches_qC_launch_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qThreadInfo_matches_qC() + + @llgs_test + @dwarf_test + def test_qThreadInfo_matches_qC_launch_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qThreadInfo_matches_qC() + + @debugserver_test + @dsym_test + def test_qThreadInfo_matches_qC_attach_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.qThreadInfo_matches_qC() + + @llgs_test + @dwarf_test + def test_qThreadInfo_matches_qC_attach_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.qThreadInfo_matches_qC() + + def p_returns_correct_data_size_for_each_qRegisterInfo(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.assertTrue(len(reg_infos) > 0) + + # Read value for each register. + reg_index = 0 + for reg_info in reg_infos: + # Skip registers that don't have a register set. For x86, these are + # the DRx registers, which have no LLDB-kind register number and thus + # cannot be read via normal NativeRegisterContext::ReadRegister(reg_info,...) calls. + if not "set" in reg_info: + continue + + # Clear existing packet expectations. + self.reset_test_sequence() + + # Run the register query + self.test_sequence.add_log_lines( + ["read packet: $p{0:x}#00".format(reg_index), + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the response length. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + self.assertEquals(len(p_response), 2 * int(reg_info["bitsize"]) / 8) + + # Increment loop + reg_index += 1 + + @debugserver_test + @dsym_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @llgs_test + @dwarf_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @debugserver_test + @dsym_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @llgs_test + @dwarf_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + def Hg_switches_to_3_threads(self): + # Startup the inferior with three threads (main + 2 new ones). + procs = self.prep_debug_monitor_and_inferior(inferior_args=["thread:new", "thread:new"]) + + # Let the inferior process have a few moments to start up the thread when launched. (The launch scenario has no time to run, so threads won't be there yet.) + self.run_process_then_stop(run_seconds=1) + + # Wait at most x seconds for 3 threads to be present. + threads = self.wait_for_thread_count(3, timeout_seconds=5) + self.assertEquals(len(threads), 3) + + # verify we can $H to each thead, and $qC matches the thread we set. + for thread in threads: + # Change to each thread, verify current thread id. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $Hg{0:x}#00".format(thread), # Set current thread. + "send packet: $OK#00", + "read packet: $qC#00", + { "direction":"send", "regex":r"^\$QC([0-9a-fA-F]+)#", "capture":{1:"thread_id"} }], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the thread id. + self.assertIsNotNone(context.get("thread_id")) + self.assertEquals(int(context.get("thread_id"), 16), thread) + + @debugserver_test + @dsym_test + def test_Hg_switches_to_3_threads_launch_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.Hg_switches_to_3_threads() + + @llgs_test + @dwarf_test + def test_Hg_switches_to_3_threads_launch_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.Hg_switches_to_3_threads() + + @debugserver_test + @dsym_test + def test_Hg_switches_to_3_threads_attach_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_attach() + self.Hg_switches_to_3_threads() + + @llgs_test + @dwarf_test + def test_Hg_switches_to_3_threads_attach_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_attach() + self.Hg_switches_to_3_threads() + + def Hc_then_Csignal_signals_correct_thread(self, segfault_signo): + # NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached, + # and the test requires getting stdout from the exe. + + NUM_THREADS = 3 + + # Startup the inferior with three threads (main + NUM_THREADS-1 worker threads). + # inferior_args=["thread:print-ids"] + inferior_args=["thread:segfault"] + for i in range(NUM_THREADS - 1): + # if i > 0: + # Give time between thread creation/segfaulting for the handler to work. + # inferior_args.append("sleep:1") + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + + # Launch/attach. (In our case, this should only ever be launched since we need inferior stdout/stderr). + procs = self.prep_debug_monitor_and_inferior(inferior_args=inferior_args) + self.test_sequence.add_log_lines(["read packet: $c#63"], True) + context = self.expect_gdbremote_sequence() + + # Let the inferior process have a few moments to start up the thread when launched. + # context = self.run_process_then_stop(run_seconds=1) + + # Wait at most x seconds for all threads to be present. + # threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5) + # self.assertEquals(len(threads), NUM_THREADS) + + signaled_tids = {} + print_thread_ids = {} + + # Switch to each thread, deliver a signal, and verify signal delivery + for i in range(NUM_THREADS - 1): + # Run until SIGSEGV comes in. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [{"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"signo", 2:"thread_id"} } + ], True) + + context = self.expect_gdbremote_sequence(timeout_seconds=10) + self.assertIsNotNone(context) + signo = context.get("signo") + self.assertEqual(int(signo, 16), segfault_signo) + + # Ensure we haven't seen this tid yet. + thread_id = int(context.get("thread_id"), 16) + self.assertFalse(thread_id in signaled_tids) + signaled_tids[thread_id] = 1 + + # Send SIGUSR1 to the thread that signaled the SIGSEGV. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Set the continue thread. + "read packet: $Hc{0:x}#00".format(thread_id), # Set current thread. + "send packet: $OK#00", + + # Continue sending the signal number to the continue thread. + # The commented out packet is a way to do this same operation without using + # a $Hc (but this test is testing $Hc, so we'll stick with the former). + "read packet: $C{0:x}#00".format(signal.SIGUSR1), + # "read packet: $vCont;C{0:x}:{1:x};c#00".format(signal.SIGUSR1, thread_id), + + # FIXME: Linux does not report the thread stop on the delivered signal (SIGUSR1 here). MacOSX debugserver does. + # But MacOSX debugserver isn't guaranteeing the thread the signal handler runs on, so currently its an XFAIL. + # Need to rectify behavior here. The linux behavior is more intuitive to me since we're essentially swapping out + # an about-to-be-delivered signal (for which we already sent a stop packet) to a different signal. + # {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, + # "read packet: $c#63", + { "type":"output_match", "regex":r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\nthread ([0-9a-fA-F]+): past SIGSEGV\r\n", "capture":{ 1:"print_thread_id", 2:"post_handle_thread_id" } }, + ], + True) + + # Run the sequence. + context = self.expect_gdbremote_sequence(timeout_seconds=10) + self.assertIsNotNone(context) + + # Ensure the stop signal is the signal we delivered. + # stop_signo = context.get("stop_signo") + # self.assertIsNotNone(stop_signo) + # self.assertEquals(int(stop_signo,16), signal.SIGUSR1) + + # Ensure the stop thread is the thread to which we delivered the signal. + # stop_thread_id = context.get("stop_thread_id") + # self.assertIsNotNone(stop_thread_id) + # self.assertEquals(int(stop_thread_id,16), thread_id) + + # Ensure we haven't seen this thread id yet. The inferior's self-obtained thread ids are not guaranteed to match the stub tids (at least on MacOSX). + print_thread_id = context.get("print_thread_id") + self.assertIsNotNone(print_thread_id) + print_thread_id = int(print_thread_id, 16) + self.assertFalse(print_thread_id in print_thread_ids) + + # Now remember this print (i.e. inferior-reflected) thread id and ensure we don't hit it again. + print_thread_ids[print_thread_id] = 1 + + # Ensure post signal-handle thread id matches the thread that initially raised the SIGSEGV. + post_handle_thread_id = context.get("post_handle_thread_id") + self.assertIsNotNone(post_handle_thread_id) + post_handle_thread_id = int(post_handle_thread_id, 16) + self.assertEquals(post_handle_thread_id, print_thread_id) + + @debugserver_test + @dsym_test + @unittest2.expectedFailure() + def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + # Darwin debugserver translates some signals like SIGSEGV into some gdb expectations about fixed signal numbers. + self.Hc_then_Csignal_signals_correct_thread(self.TARGET_EXC_BAD_ACCESS) + + @llgs_test + @dwarf_test + def test_Hc_then_Csignal_signals_correct_thread_launch_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.Hc_then_Csignal_signals_correct_thread(signal.SIGSEGV) + + def m_packet_reads_memory(self): + # This is the memory we will write into the inferior and then ensure we can read back with $m. + MEMORY_CONTENTS = "Test contents 0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz" + + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["set-message:%s" % MEMORY_CONTENTS, "get-data-address-hex:g_message", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^data address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"message_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the message address. + self.assertIsNotNone(context.get("message_address")) + message_address = int(context.get("message_address"), 16) + + # Grab contents from the inferior. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $m{0:x},{1:x}#00".format(message_address, len(MEMORY_CONTENTS)), + {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"read_contents"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure what we read from inferior memory is what we wrote. + self.assertIsNotNone(context.get("read_contents")) + read_contents = context.get("read_contents").decode("hex") + self.assertEquals(read_contents, MEMORY_CONTENTS) + + @debugserver_test + @dsym_test + def test_m_packet_reads_memory_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.m_packet_reads_memory() + + @llgs_test + @dwarf_test + def test_m_packet_reads_memory_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.m_packet_reads_memory() + + def qMemoryRegionInfo_is_supported(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior() + + # Ask if it supports $qMemoryRegionInfo. + self.test_sequence.add_log_lines( + ["read packet: $qMemoryRegionInfo#00", + "send packet: $OK#00" + ], True) + self.expect_gdbremote_sequence() + + @debugserver_test + @dsym_test + def test_qMemoryRegionInfo_is_supported_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_is_supported() + + @llgs_test + @dwarf_test + def test_qMemoryRegionInfo_is_supported_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_is_supported() + + def qMemoryRegionInfo_reports_code_address_as_executable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-code-address-hex:hello", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"code_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the code address. + self.assertIsNotNone(context.get("code_address")) + code_address = int(context.get("code_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(code_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure code address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("x" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(code_address, mem_region_dict) + + @debugserver_test + @dsym_test + def test_qMemoryRegionInfo_reports_code_address_as_executable_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_code_address_as_executable() + + @llgs_test + @dwarf_test + def test_qMemoryRegionInfo_reports_code_address_as_executable_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_code_address_as_executable() + + def qMemoryRegionInfo_reports_stack_address_as_readable_writeable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-stack-address-hex:", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^stack address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"stack_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the address. + self.assertIsNotNone(context.get("stack_address")) + stack_address = int(context.get("stack_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(stack_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("w" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(stack_address, mem_region_dict) + + @debugserver_test + @dsym_test + def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() + + @llgs_test + @dwarf_test + def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() + + def qMemoryRegionInfo_reports_heap_address_as_readable_writeable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-heap-address-hex:", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^heap address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"heap_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the address. + self.assertIsNotNone(context.get("heap_address")) + heap_address = int(context.get("heap_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(heap_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("w" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(heap_address, mem_region_dict) + + + @debugserver_test + @dsym_test + def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() + + @llgs_test + @dwarf_test + def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() + + def software_breakpoint_set_and_remove_work(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-code-address-hex:hello", "sleep:1", "call-function:hello"]) + + # Run the process + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + self.test_sequence.add_log_lines( + [# Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the function call entry point. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"function_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info - we need endian of target to handle register value conversions. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + (pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_infos) + self.assertIsNotNone(pc_lldb_reg_index) + self.assertIsNotNone(pc_reg_info) + + # Grab the function address. + self.assertIsNotNone(context.get("function_address")) + function_address = int(context.get("function_address"), 16) + + # Set the breakpoint. + # Note this might need to be switched per platform (ARM, mips, etc.). + BREAKPOINT_KIND = 1 + self.reset_test_sequence() + self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the stop signal reported was the breakpoint signal number. + stop_signo = context.get("stop_signo") + self.assertIsNotNone(stop_signo) + self.assertEquals(int(stop_signo,16), signal.SIGTRAP) + + # Ensure we did not receive any output. If the breakpoint was not set, we would + # see output (from a launched process with captured stdio) printing a hello, world message. + # That would indicate the breakpoint didn't take. + self.assertEquals(len(context["O_content"]), 0) + + # Verify that the PC for the main thread is where we expect it - right at the breakpoint address. + # This acts as a another validation on the register reading code. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Print the PC. This should match the breakpoint address. + "read packet: $p{0:x}#00".format(pc_lldb_reg_index), + # Capture $p results. + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the PC is where we expect. Note response is in endianness of the inferior. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + + # Convert from target endian to int. + returned_pc = lldbgdbserverutils.unpack_register_hex_unsigned(endian, p_response) + self.assertEquals(returned_pc, function_address) + + # Verify that a breakpoint remove and continue gets us the expected output. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Remove the breakpoint. + "read packet: $z0,{0:x},{1}#00".format(function_address, BREAKPOINT_KIND), + # Verify the stub could unset it. + "send packet: $OK#00", + # Continue running. + "read packet: $c#63", + # We should now receive the output from the call. + { "type":"output_match", "regex":r"^hello, world\r\n$" }, + # And wait for program completion. + {"direction":"send", "regex":r"^\$W00(.*)#[0-9a-fA-F]{2}$" }, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @dsym_test + def test_software_breakpoint_set_and_remove_work_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.software_breakpoint_set_and_remove_work() + + @llgs_test + @dwarf_test + def test_software_breakpoint_set_and_remove_work_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.software_breakpoint_set_and_remove_work() + + def qSupported_returns_known_stub_features(self): + # Start up the stub and start/prep the inferior. + procs = self.prep_debug_monitor_and_inferior() + self.add_qSupported_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Retrieve the qSupported features. + supported_dict = self.parse_qSupported_response(context) + self.assertIsNotNone(supported_dict) + self.assertTrue(len(supported_dict) > 0) + + @debugserver_test + @dsym_test + def test_qSupported_returns_known_stub_features_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.qSupported_returns_known_stub_features() + + @llgs_test + @dwarf_test + def test_qSupported_returns_known_stub_features_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.qSupported_returns_known_stub_features() + + def written_M_content_reads_back_correctly(self): + TEST_MESSAGE = "Hello, memory" + + # Start up the stub and start/prep the inferior. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["set-message:xxxxxxxxxxxxxX", "get-data-address-hex:g_message", "sleep:1", "print-message:"]) + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^data address: 0x([0-9a-fA-F]+)\r\n$", "capture":{ 1:"message_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the message address. + self.assertIsNotNone(context.get("message_address")) + message_address = int(context.get("message_address"), 16) + + # Hex-encode the test message, adding null termination. + hex_encoded_message = TEST_MESSAGE.encode("hex") + + # Write the message to the inferior. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $M{0:x},{1:x}:{2}#00".format(message_address, len(hex_encoded_message)/2, hex_encoded_message), + "send packet: $OK#00", + "read packet: $c#63", + { "type":"output_match", "regex":r"^message: (.+)\r\n$", "capture":{ 1:"printed_message"} }, + "send packet: $W00#00", + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure what we read from inferior memory is what we wrote. + printed_message = context.get("printed_message") + self.assertIsNotNone(printed_message) + self.assertEquals(printed_message, TEST_MESSAGE + "X") + + @debugserver_test + @dsym_test + def test_written_M_content_reads_back_correctly_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.written_M_content_reads_back_correctly() + + @llgs_test + @dwarf_test + def test_written_M_content_reads_back_correctly_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.written_M_content_reads_back_correctly() + + def P_writes_all_gpr_registers(self): + # Start inferior debug session, grab all register info. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Process register infos. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + # Process endian. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Pull out the register infos that we think we can bit flip successfully,. + gpr_reg_infos = [reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] + self.assertTrue(len(gpr_reg_infos) > 0) + + # Write flipped bit pattern of existing value to each register. + (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(gpr_reg_infos, endian) + # print "successful writes: {}, failed writes: {}".format(successful_writes, failed_writes) + self.assertTrue(successful_writes > 0) + + # Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp). + # Come back to this. I have the test rigged to verify that at least some of the bit-flip writes work. + @debugserver_test + @dsym_test + def test_P_writes_all_gpr_registers_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.P_writes_all_gpr_registers() + + @llgs_test + @dwarf_test + def test_P_writes_all_gpr_registers_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.P_writes_all_gpr_registers() + + def P_and_p_thread_suffix_work(self): + # Startup the inferior with three threads. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["thread:new", "thread:new"]) + self.add_thread_suffix_request_packets() + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + reg_index = self.select_modifiable_register(reg_infos) + self.assertIsNotNone(reg_index) + reg_byte_size = int(reg_infos[reg_index]["bitsize"]) / 8 + self.assertTrue(reg_byte_size > 0) + + # Run the process a bit so threads can start up, and collect register info. + context = self.run_process_then_stop(run_seconds=1) + self.assertIsNotNone(context) + + # Wait for 3 threads to be present. + threads = self.wait_for_thread_count(3, timeout_seconds=5) + self.assertEquals(len(threads), 3) + + expected_reg_values = [] + register_increment = 1 + next_value = None + + # Set the same register in each of 3 threads to a different value. + # Verify each one has the unique value. + for thread in threads: + # If we don't have a next value yet, start it with the initial read value + 1 + if not next_value: + # Read pre-existing register value. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Set the next value to use for writing as the increment plus current value. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + next_value = lldbgdbserverutils.unpack_register_hex_unsigned(endian, p_response) + + # Set new value using P and thread suffix. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $P{0:x}={1};thread:{2:x}#00".format(reg_index, lldbgdbserverutils.pack_register_hex(endian, next_value, byte_size=reg_byte_size), thread), + "send packet: $OK#00", + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Save the value we set. + expected_reg_values.append(next_value) + + # Increment value for next thread to use (we want them all different so we can verify they wrote to each thread correctly next.) + next_value += register_increment + + # Revisit each thread and verify they have the expected value set for the register we wrote. + thread_index = 0 + for thread in threads: + # Read pre-existing register value. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Get the register value. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + read_value = lldbgdbserverutils.unpack_register_hex_unsigned(endian, p_response) + + # Make sure we read back what we wrote. + self.assertEquals(read_value, expected_reg_values[thread_index]) + thread_index += 1 + + # Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp). + @debugserver_test + @dsym_test + def test_P_and_p_thread_suffix_work_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.set_inferior_startup_launch() + self.P_and_p_thread_suffix_work() + + @llgs_test + @dwarf_test + def test_P_and_p_thread_suffix_work_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.set_inferior_startup_launch() + self.P_and_p_thread_suffix_work() + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/commandline/TestStubReverseConnect.py =================================================================== --- /dev/null +++ test/tools/lldb-server/commandline/TestStubReverseConnect.py @@ -0,0 +1,86 @@ +# Add the directory above ours to the python library path since we +# will import from there. +import os.path +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +import gdbremote_testcase +import re +import select +import socket +import time +from lldbtest import * + +class TestStubReverseConnect(gdbremote_testcase.GdbRemoteTestCaseBase): + _DEFAULT_TIMEOUT = 20 + + def setUp(self): + # Set up the test. + gdbremote_testcase.GdbRemoteTestCaseBase.setUp(self) + + # Create a listener on a local port. + self.listener_socket = self.create_listener_socket() + self.assertIsNotNone(self.listener_socket) + self.listener_port = self.listener_socket.getsockname()[1] + + def create_listener_socket(self, timeout_seconds=_DEFAULT_TIMEOUT): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.assertIsNotNone(sock) + + sock.settimeout(timeout_seconds) + sock.bind(("127.0.0.1",0)) + sock.listen(1) + + def tear_down_listener(): + try: + sock.shutdown(socket.SHUT_RDWR) + except: + # ignore + None + + self.addTearDownHook(tear_down_listener) + return sock + + def reverse_connect_works(self): + # Indicate stub startup should do a reverse connect. + appended_stub_args = " --reverse-connect" + if self.debug_monitor_extra_args: + self.debug_monitor_extra_args += appended_stub_args + else: + self.debug_monitor_extra_args = appended_stub_args + + self.stub_hostname = "127.0.0.1" + self.port = self.listener_port + + # Start the stub. + server = self.launch_debug_monitor(logfile=sys.stdout) + self.assertIsNotNone(server) + self.assertTrue(server.isalive()) + + # Listen for the stub's connection to us. + (stub_socket, address) = self.listener_socket.accept() + self.assertIsNotNone(stub_socket) + self.assertIsNotNone(address) + print "connected to stub {} on {}".format(address, stub_socket.getsockname()) + + # Verify we can do the handshake. If that works, we'll call it good. + self.do_handshake(stub_socket, timeout_seconds=self._DEFAULT_TIMEOUT) + + # Clean up. + stub_socket.shutdown(socket.SHUT_RDWR) + + @debugserver_test + def test_reverse_connect_works_debugserver(self): + self.init_debugserver_test(use_named_pipe=False) + self.set_inferior_startup_launch() + self.reverse_connect_works() + + @llgs_test + def test_reverse_connect_works_llgs(self): + self.init_llgs_test(use_named_pipe=False) + self.set_inferior_startup_launch() + self.reverse_connect_works() + + +if __name__ == '__main__': + unittest2.main() Index: test/tools/lldb-server/commandline/TestStubSetSID.py =================================================================== --- /dev/null +++ test/tools/lldb-server/commandline/TestStubSetSID.py @@ -0,0 +1,85 @@ +import unittest2 + +# Add the directory above ours to the python library path since we +# will import from there. +import os.path +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +import gdbremote_testcase +import os +import select +import tempfile +import time +from lldbtest import * + + +def get_common_stub_args(): + return [] if 'darwin' in sys.platform else ['g'] + + +class TestStubSetSIDTestCase(gdbremote_testcase.GdbRemoteTestCaseBase): + def get_stub_sid(self, extra_stub_args=None): + # Launch debugserver + if extra_stub_args: + self.debug_monitor_extra_args = extra_stub_args + else: + self.debug_monitor_extra_args = "" + + server = self.launch_debug_monitor() + self.assertIsNotNone(server) + self.assertTrue(server.isalive()) + server.expect("(debugserver|lldb-gdbserver)", timeout=10) + + # Get the process id for the stub. + return os.getsid(server.pid) + + def sid_is_same_without_setsid(self): + stub_sid = self.get_stub_sid() + self.assertEquals(stub_sid, os.getsid(0)) + + def sid_is_different_with_setsid(self): + stub_sid = self.get_stub_sid(" %s --setsid" % ' '.join(get_common_stub_args())) + self.assertNotEquals(stub_sid, os.getsid(0)) + + def sid_is_different_with_S(self): + stub_sid = self.get_stub_sid(" %s -S" % ' '.join(get_common_stub_args())) + self.assertNotEquals(stub_sid, os.getsid(0)) + + @debugserver_test + @unittest2.expectedFailure() # This is the whole purpose of this feature, I would expect it to be the same without --setsid. Investigate. + def test_sid_is_same_without_setsid_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_same_without_setsid() + + @llgs_test + @unittest2.expectedFailure() # This is the whole purpose of this feature, I would expect it to be the same without --setsid. Investigate. + def test_sid_is_same_without_setsid_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_same_without_setsid() + + @debugserver_test + def test_sid_is_different_with_setsid_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_setsid() + + @llgs_test + def test_sid_is_different_with_setsid_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_setsid() + + @debugserver_test + def test_sid_is_different_with_S_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_S() + + @llgs_test + def test_sid_is_different_with_S_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_S() Index: test/tools/lldb-server/gdbremote_testcase.py =================================================================== --- /dev/null +++ test/tools/lldb-server/gdbremote_testcase.py @@ -0,0 +1,1208 @@ +""" +Base class for gdb-remote test cases. +""" + +import errno +import os +import os.path +import platform +import random +import re +import select +import sets +import signal +import socket +import subprocess +import sys +import tempfile +import time +import unittest2 +from lldbtest import * +from lldbgdbserverutils import * +import logging + +class GdbRemoteTestCaseBase(TestBase): + + mydir = TestBase.compute_mydir(__file__) + + _TIMEOUT_SECONDS = 5 + + _GDBREMOTE_KILL_PACKET = "$k#6b" + + _LOGGING_LEVEL = logging.WARNING + # _LOGGING_LEVEL = logging.DEBUG + + # Start the inferior separately, attach to the inferior on the stub command line. + _STARTUP_ATTACH = "attach" + # Start the inferior separately, start the stub without attaching, allow the test to attach to the inferior however it wants (e.g. $vAttach;pid). + _STARTUP_ATTACH_MANUALLY = "attach_manually" + # Start the stub, and launch the inferior with an $A packet via the initial packet stream. + _STARTUP_LAUNCH = "launch" + + # GDB Signal numbers that are not target-specific used for common exceptions + TARGET_EXC_BAD_ACCESS = 0x91 + TARGET_EXC_BAD_INSTRUCTION = 0x92 + TARGET_EXC_ARITHMETIC = 0x93 + TARGET_EXC_EMULATION = 0x94 + TARGET_EXC_SOFTWARE = 0x95 + TARGET_EXC_BREAKPOINT = 0x96 + + def setUp(self): + TestBase.setUp(self) + FORMAT = '%(asctime)-15s %(levelname)-8s %(message)s' + logging.basicConfig(format=FORMAT) + self.logger = logging.getLogger(__name__) + self.logger.setLevel(self._LOGGING_LEVEL) + self.test_sequence = GdbRemoteTestSequence(self.logger) + self.set_inferior_startup_launch() + self.port = self.get_next_port() + self.named_pipe_path = None + self.named_pipe = None + self.named_pipe_fd = None + self.stub_sends_two_stop_notifications_on_kill = False + self.stub_hostname = "localhost" + + def get_next_port(self): + return 12000 + random.randint(0,3999) + + def reset_test_sequence(self): + self.test_sequence = GdbRemoteTestSequence(self.logger) + + def create_named_pipe(self): + # Create a temp dir and name for a pipe. + temp_dir = tempfile.mkdtemp() + named_pipe_path = os.path.join(temp_dir, "stub_port_number") + + # Create the named pipe. + os.mkfifo(named_pipe_path) + + # Open the read side of the pipe in non-blocking mode. This will return right away, ready or not. + named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK) + + # Create the file for the named pipe. Note this will follow semantics of + # a non-blocking read side of a named pipe, which has different semantics + # than a named pipe opened for read in non-blocking mode. + named_pipe = os.fdopen(named_pipe_fd, "r") + self.assertIsNotNone(named_pipe) + + def shutdown_named_pipe(): + # Close the pipe. + try: + named_pipe.close() + except: + print "failed to close named pipe" + None + + # Delete the pipe. + try: + os.remove(named_pipe_path) + except: + print "failed to delete named pipe: {}".format(named_pipe_path) + None + + # Delete the temp directory. + try: + os.rmdir(temp_dir) + except: + print "failed to delete temp dir: {}, directory contents: '{}'".format(temp_dir, os.listdir(temp_dir)) + None + + # Add the shutdown hook to clean up the named pipe. + self.addTearDownHook(shutdown_named_pipe) + + # Clear the port so the stub selects a port number. + self.port = 0 + + return (named_pipe_path, named_pipe, named_pipe_fd) + + def get_stub_port_from_named_socket(self, read_timeout_seconds=5): + # Wait for something to read with a max timeout. + (ready_readers, _, _) = select.select([self.named_pipe_fd], [], [], read_timeout_seconds) + self.assertIsNotNone(ready_readers, "write side of pipe has not written anything - stub isn't writing to pipe.") + self.assertNotEqual(len(ready_readers), 0, "write side of pipe has not written anything - stub isn't writing to pipe.") + + # Read the port from the named pipe. + stub_port_raw = self.named_pipe.read() + self.assertIsNotNone(stub_port_raw) + self.assertNotEqual(len(stub_port_raw), 0, "no content to read on pipe") + + # Trim null byte, convert to int. + stub_port_raw = stub_port_raw[:-1] + stub_port = int(stub_port_raw) + self.assertTrue(stub_port > 0) + + return stub_port + + def init_llgs_test(self, use_named_pipe=True): + self.debug_monitor_exe = get_lldb_gdbserver_exe() + if not self.debug_monitor_exe: + self.skipTest("lldb_gdbserver exe not found") + dname = os.path.join(os.environ["LLDB_TEST"], + os.environ["LLDB_SESSION_DIRNAME"]) + self.debug_monitor_extra_args = " gdbserver -c 'log enable -T -f {}/process-{}.log lldb break process thread' -c 'log enable -T -f {}/packets-{}.log gdb-remote packets'".format(dname, self.id(), dname, self.id()) + if use_named_pipe: + (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe() + + def init_debugserver_test(self, use_named_pipe=True): + self.debug_monitor_exe = get_debugserver_exe() + if not self.debug_monitor_exe: + self.skipTest("debugserver exe not found") + self.debug_monitor_extra_args = " --log-file=/tmp/packets-{}.log --log-flags=0x800000".format(self._testMethodName) + if use_named_pipe: + (self.named_pipe_path, self.named_pipe, self.named_pipe_fd) = self.create_named_pipe() + # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification + # when the process truly dies. + self.stub_sends_two_stop_notifications_on_kill = True + + def create_socket(self): + sock = socket.socket() + logger = self.logger + + def shutdown_socket(): + if sock: + try: + # send the kill packet so lldb-gdbserver shuts down gracefully + sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET) + except: + logger.warning("failed to send kill packet to debug monitor: {}; ignoring".format(sys.exc_info()[0])) + + try: + sock.close() + except: + logger.warning("failed to close socket to debug monitor: {}; ignoring".format(sys.exc_info()[0])) + + self.addTearDownHook(shutdown_socket) + + connect_info = (self.stub_hostname, self.port) + # print "connecting to stub on {}:{}".format(connect_info[0], connect_info[1]) + sock.connect(connect_info) + + return sock + + def set_inferior_startup_launch(self): + self._inferior_startup = self._STARTUP_LAUNCH + + def set_inferior_startup_attach(self): + self._inferior_startup = self._STARTUP_ATTACH + + def set_inferior_startup_attach_manually(self): + self._inferior_startup = self._STARTUP_ATTACH_MANUALLY + + def get_debug_monitor_command_line(self, attach_pid=None): + commandline = "{}{} localhost:{}".format(self.debug_monitor_exe, self.debug_monitor_extra_args, self.port) + if attach_pid: + commandline += " --attach=%d" % attach_pid + if self.named_pipe_path: + commandline += " --named-pipe %s" % self.named_pipe_path + return commandline + + def launch_debug_monitor(self, attach_pid=None, logfile=None): + # Create the command line. + import pexpect + commandline = self.get_debug_monitor_command_line(attach_pid=attach_pid) + + # Start the server. + server = pexpect.spawn(commandline, logfile=logfile) + self.assertIsNotNone(server) + server.expect(r"(debugserver|lldb-gdbserver)", timeout=10) + + # If we're receiving the stub's listening port from the named pipe, do that here. + if self.named_pipe: + self.port = self.get_stub_port_from_named_socket() + # print "debug server listening on {}".format(self.port) + + # Turn on logging for what the child sends back. + if self.TraceOn(): + server.logfile_read = sys.stdout + + return server + + def connect_to_debug_monitor(self, attach_pid=None): + if self.named_pipe: + # Create the stub. + server = self.launch_debug_monitor(attach_pid=attach_pid) + self.assertIsNotNone(server) + + def shutdown_debug_monitor(): + try: + server.close() + except: + logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0])) + self.addTearDownHook(shutdown_debug_monitor) + + # Schedule debug monitor to be shut down during teardown. + logger = self.logger + + # Attach to the stub and return a socket opened to it. + self.sock = self.create_socket() + return server + + # We're using a random port algorithm to try not to collide with other ports, + # and retry a max # times. + attempts = 0 + MAX_ATTEMPTS = 20 + + while attempts < MAX_ATTEMPTS: + server = self.launch_debug_monitor(attach_pid=attach_pid) + + # Wait until we receive the server ready message before continuing. + port_good = True + try: + server.expect_exact('Listening to port {} for a connection from localhost'.format(self.port)) + except: + port_good = False + server.close() + + if port_good: + # Schedule debug monitor to be shut down during teardown. + logger = self.logger + def shutdown_debug_monitor(): + try: + server.close() + except: + logger.warning("failed to close pexpect server for debug monitor: {}; ignoring".format(sys.exc_info()[0])) + self.addTearDownHook(shutdown_debug_monitor) + + # Create a socket to talk to the server + try: + self.sock = self.create_socket() + return server + except socket.error as serr: + # We're only trying to handle connection refused. + if serr.errno != errno.ECONNREFUSED: + raise serr + # We should close the server here to be safe. + server.close() + + # Increment attempts. + print("connect to debug monitor on port %d failed, attempt #%d of %d" % (self.port, attempts + 1, MAX_ATTEMPTS)) + attempts += 1 + + # And wait a random length of time before next attempt, to avoid collisions. + time.sleep(random.randint(1,5)) + + # Now grab a new port number. + self.port = self.get_next_port() + + raise Exception("failed to create a socket to the launched debug monitor after %d tries" % attempts) + + def launch_process_for_attach(self,inferior_args=None, sleep_seconds=3, exe_path=None): + # We're going to start a child process that the debug monitor stub can later attach to. + # This process needs to be started so that it just hangs around for a while. We'll + # have it sleep. + if not exe_path: + exe_path = os.path.abspath("a.out") + + args = [exe_path] + if inferior_args: + args.extend(inferior_args) + if sleep_seconds: + args.append("sleep:%d" % sleep_seconds) + + return subprocess.Popen(args) + + def prep_debug_monitor_and_inferior(self, inferior_args=None, inferior_sleep_seconds=3, inferior_exe_path=None): + """Prep the debug monitor, the inferior, and the expected packet stream. + + Handle the separate cases of using the debug monitor in attach-to-inferior mode + and in launch-inferior mode. + + For attach-to-inferior mode, the inferior process is first started, then + the debug monitor is started in attach to pid mode (using --attach on the + stub command line), and the no-ack-mode setup is appended to the packet + stream. The packet stream is not yet executed, ready to have more expected + packet entries added to it. + + For launch-inferior mode, the stub is first started, then no ack mode is + setup on the expected packet stream, then the verified launch packets are added + to the expected socket stream. The packet stream is not yet executed, ready + to have more expected packet entries added to it. + + The return value is: + {inferior:, server:} + """ + inferior = None + attach_pid = None + + if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY: + # Launch the process that we'll use as the inferior. + inferior = self.launch_process_for_attach(inferior_args=inferior_args, sleep_seconds=inferior_sleep_seconds, exe_path=inferior_exe_path) + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + if self._inferior_startup == self._STARTUP_ATTACH: + # In this case, we want the stub to attach via the command line, so set the command line attach pid here. + attach_pid = inferior.pid + + # Launch the debug monitor stub, attaching to the inferior. + server = self.connect_to_debug_monitor(attach_pid=attach_pid) + self.assertIsNotNone(server) + + if self._inferior_startup == self._STARTUP_LAUNCH: + # Build launch args + if not inferior_exe_path: + inferior_exe_path = os.path.abspath("a.out") + launch_args = [inferior_exe_path] + if inferior_args: + launch_args.extend(inferior_args) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + if self._inferior_startup == self._STARTUP_LAUNCH: + self.add_verified_launch_packets(launch_args) + + return {"inferior":inferior, "server":server} + + def expect_socket_recv(self, sock, expected_content_regex, timeout_seconds): + response = "" + timeout_time = time.time() + timeout_seconds + + while not expected_content_regex.match(response) and time.time() < timeout_time: + can_read, _, _ = select.select([sock], [], [], timeout_seconds) + if can_read and sock in can_read: + recv_bytes = sock.recv(4096) + if recv_bytes: + response += recv_bytes + + self.assertTrue(expected_content_regex.match(response)) + + def expect_socket_send(self, sock, content, timeout_seconds): + request_bytes_remaining = content + timeout_time = time.time() + timeout_seconds + + while len(request_bytes_remaining) > 0 and time.time() < timeout_time: + _, can_write, _ = select.select([], [sock], [], timeout_seconds) + if can_write and sock in can_write: + written_byte_count = sock.send(request_bytes_remaining) + request_bytes_remaining = request_bytes_remaining[written_byte_count:] + self.assertEquals(len(request_bytes_remaining), 0) + + def do_handshake(self, stub_socket, timeout_seconds=5): + # Write the ack. + self.expect_socket_send(stub_socket, "+", timeout_seconds) + + # Send the start no ack mode packet. + NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0" + bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST) + self.assertEquals(bytes_sent, len(NO_ACK_MODE_REQUEST)) + + # Receive the ack and "OK" + self.expect_socket_recv(stub_socket, re.compile(r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds) + + # Send the final ack. + self.expect_socket_send(stub_socket, "+", timeout_seconds) + + def add_no_ack_remote_stream(self): + self.test_sequence.add_log_lines( + ["read packet: +", + "read packet: $QStartNoAckMode#b0", + "send packet: +", + "send packet: $OK#9a", + "read packet: +"], + True) + + def add_verified_launch_packets(self, launch_args): + self.test_sequence.add_log_lines( + ["read packet: %s" % build_gdbremote_A_packet(launch_args), + "send packet: $OK#00", + "read packet: $qLaunchSuccess#a5", + "send packet: $OK#00"], + True) + + def add_thread_suffix_request_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $QThreadSuffixSupported#e4", + "send packet: $OK#00", + ], True) + + def add_process_info_collection_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qProcessInfo#dc", + { "direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"process_info_raw"} }], + True) + + _KNOWN_PROCESS_INFO_KEYS = [ + "pid", + "parent-pid", + "real-uid", + "real-gid", + "effective-uid", + "effective-gid", + "cputype", + "cpusubtype", + "ostype", + "triple", + "vendor", + "endian", + "ptrsize" + ] + + def parse_process_info_response(self, context): + # Ensure we have a process info response. + self.assertIsNotNone(context) + process_info_raw = context.get("process_info_raw") + self.assertIsNotNone(process_info_raw) + + # Pull out key:value; pairs. + process_info_dict = { match.group(1):match.group(2) for match in re.finditer(r"([^:]+):([^;]+);", process_info_raw) } + + # Validate keys are known. + for (key, val) in process_info_dict.items(): + self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS) + self.assertIsNotNone(val) + + return process_info_dict + + def add_register_info_collection_packets(self): + self.test_sequence.add_log_lines( + [ { "type":"multi_response", "query":"qRegisterInfo", "append_iteration_suffix":True, + "end_regex":re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"), + "save_key":"reg_info_responses" } ], + True) + + def parse_register_info_packets(self, context): + """Return an array of register info dictionaries, one per register info.""" + reg_info_responses = context.get("reg_info_responses") + self.assertIsNotNone(reg_info_responses) + + # Parse register infos. + return [parse_reg_info_response(reg_info_response) for reg_info_response in reg_info_responses] + + def expect_gdbremote_sequence(self, timeout_seconds=None): + if not timeout_seconds: + timeout_seconds = self._TIMEOUT_SECONDS + return expect_lldb_gdbserver_replay(self, self.sock, self.test_sequence, timeout_seconds, self.logger) + + _KNOWN_REGINFO_KEYS = [ + "name", + "alt-name", + "bitsize", + "offset", + "encoding", + "format", + "set", + "gcc", + "dwarf", + "generic", + "container-regs", + "invalidate-regs" + ] + + def assert_valid_reg_info(self, reg_info): + # Assert we know about all the reginfo keys parsed. + for key in reg_info: + self.assertTrue(key in self._KNOWN_REGINFO_KEYS) + + # Check the bare-minimum expected set of register info keys. + self.assertTrue("name" in reg_info) + self.assertTrue("bitsize" in reg_info) + self.assertTrue("offset" in reg_info) + self.assertTrue("encoding" in reg_info) + self.assertTrue("format" in reg_info) + + def find_pc_reg_info(self, reg_infos): + lldb_reg_index = 0 + for reg_info in reg_infos: + if ("generic" in reg_info) and (reg_info["generic"] == "pc"): + return (lldb_reg_index, reg_info) + lldb_reg_index += 1 + + return (None, None) + + def add_lldb_register_index(self, reg_infos): + """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry. + + We'll use this when we want to call packets like P/p with a register index but do so + on only a subset of the full register info set. + """ + self.assertIsNotNone(reg_infos) + + reg_index = 0 + for reg_info in reg_infos: + reg_info["lldb_register_index"] = reg_index + reg_index += 1 + + def add_query_memory_region_packets(self, address): + self.test_sequence.add_log_lines( + ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address), + {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"memory_region_response"} }], + True) + + def parse_key_val_dict(self, key_val_text, allow_dupes=True): + self.assertIsNotNone(key_val_text) + kv_dict = {} + for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text): + key = match.group(1) + val = match.group(2) + if key in kv_dict: + if allow_dupes: + if type(kv_dict[key]) == list: + kv_dict[key].append(val) + else: + # Promote to list + kv_dict[key] = [kv_dict[key], val] + else: + self.fail("key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(key, val, key_val_text, kv_dict)) + else: + kv_dict[key] = val + return kv_dict + + def parse_memory_region_packet(self, context): + # Ensure we have a context. + self.assertIsNotNone(context.get("memory_region_response")) + + # Pull out key:value; pairs. + mem_region_dict = self.parse_key_val_dict(context.get("memory_region_response")) + + # Validate keys are known. + for (key, val) in mem_region_dict.items(): + self.assertTrue(key in ["start", "size", "permissions", "error"]) + self.assertIsNotNone(val) + + # Return the dictionary of key-value pairs for the memory region. + return mem_region_dict + + def assert_address_within_memory_region(self, test_address, mem_region_dict): + self.assertIsNotNone(mem_region_dict) + self.assertTrue("start" in mem_region_dict) + self.assertTrue("size" in mem_region_dict) + + range_start = int(mem_region_dict["start"], 16) + range_size = int(mem_region_dict["size"], 16) + range_end = range_start + range_size + + if test_address < range_start: + self.fail("address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) + elif test_address >= range_end: + self.fail("address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(test_address, range_start, range_end, range_size)) + + def add_threadinfo_collection_packets(self): + self.test_sequence.add_log_lines( + [ { "type":"multi_response", "first_query":"qfThreadInfo", "next_query":"qsThreadInfo", + "append_iteration_suffix":False, "end_regex":re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"), + "save_key":"threadinfo_responses" } ], + True) + + def parse_threadinfo_packets(self, context): + """Return an array of thread ids (decimal ints), one per thread.""" + threadinfo_responses = context.get("threadinfo_responses") + self.assertIsNotNone(threadinfo_responses) + + thread_ids = [] + for threadinfo_response in threadinfo_responses: + new_thread_infos = parse_threadinfo_response(threadinfo_response) + thread_ids.extend(new_thread_infos) + return thread_ids + + def wait_for_thread_count(self, thread_count, timeout_seconds=3): + start_time = time.time() + timeout_time = start_time + timeout_seconds + + actual_thread_count = 0 + while actual_thread_count < thread_count: + self.reset_test_sequence() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + actual_thread_count = len(threads) + + if time.time() > timeout_time: + raise Exception( + 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format( + timeout_seconds, thread_count, actual_thread_count)) + + return threads + + def add_set_breakpoint_packets(self, address, do_continue=True, breakpoint_kind=1): + self.test_sequence.add_log_lines( + [# Set the breakpoint. + "read packet: $Z0,{0:x},{1}#00".format(address, breakpoint_kind), + # Verify the stub could set it. + "send packet: $OK#00", + ], True) + + if (do_continue): + self.test_sequence.add_log_lines( + [# Continue the inferior. + "read packet: $c#63", + # Expect a breakpoint stop report. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, + ], True) + + def add_remove_breakpoint_packets(self, address, breakpoint_kind=1): + self.test_sequence.add_log_lines( + [# Remove the breakpoint. + "read packet: $z0,{0:x},{1}#00".format(address, breakpoint_kind), + # Verify the stub could unset it. + "send packet: $OK#00", + ], True) + + def add_qSupported_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qSupported#00", + {"direction":"send", "regex":r"^\$(.*)#[0-9a-fA-F]{2}", "capture":{1: "qSupported_response"}}, + ], True) + + _KNOWN_QSUPPORTED_STUB_FEATURES = [ + "augmented-libraries-svr4-read", + "PacketSize", + "QStartNoAckMode", + "QThreadSuffixSupported", + "QListThreadsInStopReply", + "qXfer:auxv:read", + "qXfer:libraries:read", + "qXfer:libraries-svr4:read", + ] + + def parse_qSupported_response(self, context): + self.assertIsNotNone(context) + + raw_response = context.get("qSupported_response") + self.assertIsNotNone(raw_response) + + # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the + # +,-,? is stripped from the key and set as the value. + supported_dict = {} + for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response): + key = match.group(1) + val = match.group(3) + + # key=val: store as is + if val and len(val) > 0: + supported_dict[key] = val + else: + if len(key) < 2: + raise Exception("singular stub feature is too short: must be stub_feature{+,-,?}") + supported_type = key[-1] + key = key[:-1] + if not supported_type in ["+", "-", "?"]: + raise Exception("malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type)) + supported_dict[key] = supported_type + # Ensure we know the supported element + if not key in self._KNOWN_QSUPPORTED_STUB_FEATURES: + raise Exception("unknown qSupported stub feature reported: %s" % key) + + return supported_dict + + def run_process_then_stop(self, run_seconds=1): + # Tell the stub to continue. + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8"], + True) + context = self.expect_gdbremote_sequence() + + # Wait for run_seconds. + time.sleep(run_seconds) + + # Send an interrupt, capture a T response. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: {}".format(chr(03)), + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture":{1:"stop_result"} }], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + self.assertIsNotNone(context.get("stop_result")) + + return context + + def select_modifiable_register(self, reg_infos): + """Find a register that can be read/written freely.""" + PREFERRED_REGISTER_NAMES = sets.Set(["rax",]) + + # First check for the first register from the preferred register name set. + alternative_register_index = None + + self.assertIsNotNone(reg_infos) + for reg_info in reg_infos: + if ("name" in reg_info) and (reg_info["name"] in PREFERRED_REGISTER_NAMES): + # We found a preferred register. Use it. + return reg_info["lldb_register_index"] + if ("generic" in reg_info) and (reg_info["generic"] == "fp"): + # A frame pointer register will do as a register to modify temporarily. + alternative_register_index = reg_info["lldb_register_index"] + + # We didn't find a preferred register. Return whatever alternative register + # we found, if any. + return alternative_register_index + + def extract_registers_from_stop_notification(self, stop_key_vals_text): + self.assertIsNotNone(stop_key_vals_text) + kv_dict = self.parse_key_val_dict(stop_key_vals_text) + + registers = {} + for (key, val) in kv_dict.items(): + if re.match(r"^[0-9a-fA-F]+$", key): + registers[int(key, 16)] = val + return registers + + def gather_register_infos(self): + self.reset_test_sequence() + self.add_register_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + return reg_infos + + def find_generic_register_with_name(self, reg_infos, generic_name): + self.assertIsNotNone(reg_infos) + for reg_info in reg_infos: + if ("generic" in reg_info) and (reg_info["generic"] == generic_name): + return reg_info + return None + + def decode_gdbremote_binary(self, encoded_bytes): + decoded_bytes = "" + i = 0 + while i < len(encoded_bytes): + if encoded_bytes[i] == "}": + # Handle escaped char. + self.assertTrue(i + 1 < len(encoded_bytes)) + decoded_bytes += chr(ord(encoded_bytes[i+1]) ^ 0x20) + i +=2 + elif encoded_bytes[i] == "*": + # Handle run length encoding. + self.assertTrue(len(decoded_bytes) > 0) + self.assertTrue(i + 1 < len(encoded_bytes)) + repeat_count = ord(encoded_bytes[i+1]) - 29 + decoded_bytes += decoded_bytes[-1] * repeat_count + i += 2 + else: + decoded_bytes += encoded_bytes[i] + i += 1 + return decoded_bytes + + def build_auxv_dict(self, endian, word_size, auxv_data): + self.assertIsNotNone(endian) + self.assertIsNotNone(word_size) + self.assertIsNotNone(auxv_data) + + auxv_dict = {} + + while len(auxv_data) > 0: + # Chop off key. + raw_key = auxv_data[:word_size] + auxv_data = auxv_data[word_size:] + + # Chop of value. + raw_value = auxv_data[:word_size] + auxv_data = auxv_data[word_size:] + + # Convert raw text from target endian. + key = unpack_endian_binary_string(endian, raw_key) + value = unpack_endian_binary_string(endian, raw_value) + + # Handle ending entry. + if key == 0: + self.assertEquals(value, 0) + return auxv_dict + + # The key should not already be present. + self.assertFalse(key in auxv_dict) + auxv_dict[key] = value + + self.fail("should not reach here - implies required double zero entry not found") + return auxv_dict + + def read_binary_data_in_chunks(self, command_prefix, chunk_length): + """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned.""" + offset = 0 + done = False + decoded_data = "" + + while not done: + # Grab the next iteration of data. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + "read packet: ${}{:x},{:x}:#00".format(command_prefix, offset, chunk_length), + {"direction":"send", "regex":re.compile(r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE|re.DOTALL), "capture":{1:"response_type", 2:"content_raw"} } + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + response_type = context.get("response_type") + self.assertIsNotNone(response_type) + self.assertTrue(response_type in ["l", "m"]) + + # Move offset along. + offset += chunk_length + + # Figure out if we're done. We're done if the response type is l. + done = response_type == "l" + + # Decode binary data. + content_raw = context.get("content_raw") + if content_raw and len(content_raw) > 0: + self.assertIsNotNone(content_raw) + decoded_data += self.decode_gdbremote_binary(content_raw) + return decoded_data + + def add_interrupt_packets(self): + self.test_sequence.add_log_lines([ + # Send the intterupt. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", "capture":{1:"stop_signo", 2:"stop_key_val_text" } }, + ], True) + + def parse_interrupt_packets(self, context): + self.assertIsNotNone(context.get("stop_signo")) + self.assertIsNotNone(context.get("stop_key_val_text")) + return (int(context["stop_signo"], 16), self.parse_key_val_dict(context["stop_key_val_text"])) + + def add_QSaveRegisterState_packets(self, thread_id): + if thread_id: + # Use the thread suffix form. + request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(thread_id) + else: + request = "read packet: $QSaveRegisterState#00" + + self.test_sequence.add_log_lines([ + request, + {"direction":"send", "regex":r"^\$(E?.*)#[0-9a-fA-F]{2}$", "capture":{1:"save_response" } }, + ], True) + + def parse_QSaveRegisterState_response(self, context): + self.assertIsNotNone(context) + + save_response = context.get("save_response") + self.assertIsNotNone(save_response) + + if len(save_response) < 1 or save_response[0] == "E": + # error received + return (False, None) + else: + return (True, int(save_response)) + + def add_QRestoreRegisterState_packets(self, save_id, thread_id=None): + if thread_id: + # Use the thread suffix form. + request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(save_id, thread_id) + else: + request = "read packet: $QRestoreRegisterState:{}#00".format(save_id) + + self.test_sequence.add_log_lines([ + request, + "send packet: $OK#00" + ], True) + + def flip_all_bits_in_each_register_value(self, reg_infos, endian, thread_id=None): + self.assertIsNotNone(reg_infos) + + successful_writes = 0 + failed_writes = 0 + + for reg_info in reg_infos: + # Use the lldb register index added to the reg info. We're not necessarily + # working off a full set of register infos, so an inferred register index could be wrong. + reg_index = reg_info["lldb_register_index"] + self.assertIsNotNone(reg_index) + + reg_byte_size = int(reg_info["bitsize"])/8 + self.assertTrue(reg_byte_size > 0) + + # Handle thread suffix. + if thread_id: + p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id) + else: + p_request = "read packet: $p{:x}#00".format(reg_index) + + # Read the existing value. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the response length. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + initial_reg_value = unpack_register_hex_unsigned(endian, p_response) + + # Flip the value by xoring with all 1s + all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8) + flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16) + # print "reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int) + + # Handle thread suffix for P. + if thread_id: + P_request = "read packet: $P{:x}={};thread:{:x}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size), thread_id) + else: + P_request = "read packet: $P{:x}={}#00".format(reg_index, pack_register_hex(endian, flipped_bits_int, byte_size=reg_byte_size)) + + # Write the flipped value to the register. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + P_request, + { "direction":"send", "regex":r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", "capture":{1:"P_response"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail + # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them + # all flipping perfectly. + P_response = context.get("P_response") + self.assertIsNotNone(P_response) + if P_response == "OK": + successful_writes += 1 + else: + failed_writes += 1 + # print "reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response) + + # Read back the register value, ensure it matches the flipped value. + if P_response == "OK": + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + verify_p_response_raw = context.get("p_response") + self.assertIsNotNone(verify_p_response_raw) + verify_bits = unpack_register_hex_unsigned(endian, verify_p_response_raw) + + if verify_bits != flipped_bits_int: + # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts. + # print "reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits) + successful_writes -= 1 + failed_writes +=1 + + return (successful_writes, failed_writes) + + def is_bit_flippable_register(self, reg_info): + if not reg_info: + return False + if not "set" in reg_info: + return False + if reg_info["set"] != "General Purpose Registers": + return False + if ("container-regs" in reg_info) and (len(reg_info["container-regs"]) > 0): + # Don't try to bit flip registers contained in another register. + return False + if re.match("^.s$", reg_info["name"]): + # This is a 2-letter register name that ends in "s", like a segment register. + # Don't try to bit flip these. + return False + # Okay, this looks fine-enough. + return True + + def read_register_values(self, reg_infos, endian, thread_id=None): + self.assertIsNotNone(reg_infos) + values = {} + + for reg_info in reg_infos: + # We append a register index when load reg infos so we can work with subsets. + reg_index = reg_info.get("lldb_register_index") + self.assertIsNotNone(reg_index) + + # Handle thread suffix. + if thread_id: + p_request = "read packet: $p{:x};thread:{:x}#00".format(reg_index, thread_id) + else: + p_request = "read packet: $p{:x}#00".format(reg_index) + + # Read it with p. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + { "direction":"send", "regex":r"^\$([0-9a-fA-F]+)#", "capture":{1:"p_response"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Convert value from target endian to integral. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + self.assertTrue(len(p_response) > 0) + self.assertFalse(p_response[0] == "E") + + values[reg_index] = unpack_register_hex_unsigned(endian, p_response) + + return values + + def add_vCont_query_packets(self): + self.test_sequence.add_log_lines([ + "read packet: $vCont?#49", + {"direction":"send", "regex":r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", "capture":{2:"vCont_query_response" } }, + ], True) + + def parse_vCont_query_response(self, context): + self.assertIsNotNone(context) + vCont_query_response = context.get("vCont_query_response") + + # Handle case of no vCont support at all - in which case the capture group will be none or zero length. + if not vCont_query_response or len(vCont_query_response) == 0: + return {} + + return {key:1 for key in vCont_query_response.split(";") if key and len(key) > 0} + + def count_single_steps_until_true(self, thread_id, predicate, args, max_step_count=100, use_Hc_packet=True, step_instruction="s"): + """Used by single step test that appears in a few different contexts.""" + single_step_count = 0 + + while single_step_count < max_step_count: + self.assertIsNotNone(thread_id) + + # Build the packet for the single step instruction. We replace {thread}, if present, with the thread_id. + step_packet = "read packet: ${}#00".format(re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction)) + # print "\nstep_packet created: {}\n".format(step_packet) + + # Single step. + self.reset_test_sequence() + if use_Hc_packet: + self.test_sequence.add_log_lines( + [# Set the continue thread. + "read packet: $Hc{0:x}#00".format(thread_id), + "send packet: $OK#00", + ], True) + self.test_sequence.add_log_lines([ + # Single step. + step_packet, + # "read packet: $vCont;s:{0:x}#00".format(thread_id), + # Expect a breakpoint stop report. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + self.assertIsNotNone(context.get("stop_signo")) + self.assertEquals(int(context.get("stop_signo"), 16), signal.SIGTRAP) + + single_step_count += 1 + + # See if the predicate is true. If so, we're done. + if predicate(args): + return (True, single_step_count) + + # The predicate didn't return true within the runaway step count. + return (False, single_step_count) + + def g_c1_c2_contents_are(self, args): + """Used by single step test that appears in a few different contexts.""" + g_c1_address = args["g_c1_address"] + g_c2_address = args["g_c2_address"] + expected_g_c1 = args["expected_g_c1"] + expected_g_c2 = args["expected_g_c2"] + + # Read g_c1 and g_c2 contents. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1), + {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c1_contents"} }, + "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1), + {"direction":"send", "regex":r"^\$(.+)#[0-9a-fA-F]{2}$", "capture":{1:"g_c2_contents"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Check if what we read from inferior memory is what we are expecting. + self.assertIsNotNone(context.get("g_c1_contents")) + self.assertIsNotNone(context.get("g_c2_contents")) + + return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (context.get("g_c2_contents").decode("hex") == expected_g_c2) + + def single_step_only_steps_one_instruction(self, use_Hc_packet=True, step_instruction="s"): + """Used by single step test that appears in a few different contexts.""" + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-code-address-hex:swap_chars", "get-data-address-hex:g_c1", "get-data-address-hex:g_c2", "sleep:1", "call-function:swap_chars", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [# Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the function call entry point. + # Note we require launch-only testing so we can get inferior otuput. + { "type":"output_match", "regex":r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$", + "capture":{ 1:"function_address", 2:"g_c1_address", 3:"g_c2_address"} }, + # Now stop the inferior. + "read packet: {}".format(chr(03)), + # And wait for the stop notification. + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the main thread id. + self.assertIsNotNone(context.get("stop_thread_id")) + main_thread_id = int(context.get("stop_thread_id"), 16) + + # Grab the function address. + self.assertIsNotNone(context.get("function_address")) + function_address = int(context.get("function_address"), 16) + + # Grab the data addresses. + self.assertIsNotNone(context.get("g_c1_address")) + g_c1_address = int(context.get("g_c1_address"), 16) + + self.assertIsNotNone(context.get("g_c2_address")) + g_c2_address = int(context.get("g_c2_address"), 16) + + # Set a breakpoint at the given address. + # Note this might need to be switched per platform (ARM, mips, etc.). + BREAKPOINT_KIND = 1 + self.reset_test_sequence() + self.add_set_breakpoint_packets(function_address, do_continue=True, breakpoint_kind=BREAKPOINT_KIND) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Remove the breakpoint. + self.reset_test_sequence() + self.add_remove_breakpoint_packets(function_address, breakpoint_kind=BREAKPOINT_KIND) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify g_c1 and g_c2 match expected initial state. + args = {} + args["g_c1_address"] = g_c1_address + args["g_c2_address"] = g_c2_address + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "1" + + self.assertTrue(self.g_c1_c2_contents_are(args)) + + # Verify we take only a small number of steps to hit the first state. Might need to work through function entry prologue code. + args["expected_g_c1"] = "1" + args["expected_g_c2"] = "1" + (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=25, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) + self.assertTrue(state_reached) + + # Verify we hit the next state. + args["expected_g_c1"] = "1" + args["expected_g_c2"] = "0" + (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEquals(step_count, 1) + + # Verify we hit the next state. + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "0" + (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEquals(step_count, 1) + + # Verify we hit the next state. + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "1" + (state_reached, step_count) = self.count_single_steps_until_true(main_thread_id, self.g_c1_c2_contents_are, args, max_step_count=5, use_Hc_packet=use_Hc_packet, step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEquals(step_count, 1) + Index: test/tools/lldb-server/inferior-crash/Makefile =================================================================== --- /dev/null +++ test/tools/lldb-server/inferior-crash/Makefile @@ -0,0 +1,8 @@ +LEVEL = ../../../make + +CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS -std=c++11 +# LD_EXTRAS := -lpthread +CXX_SOURCES := main.cpp +MAKE_DSYM :=NO + +include $(LEVEL)/Makefile.rules Index: test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py =================================================================== --- /dev/null +++ test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py @@ -0,0 +1,45 @@ +import unittest2 + +# Add the directory above ours to the python library path since we +# will import from there. +import os.path +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +import gdbremote_testcase +import signal +from lldbtest import * + +class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + def inferior_abort_received(self): + procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"]) + self.assertIsNotNone(procs) + + self.test_sequence.add_log_lines([ + "read packet: $vCont;c#a8", + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", "capture":{ 1:"hex_exit_code"} }, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEquals(int(hex_exit_code, 16), signal.SIGABRT) + + @debugserver_test + @dsym_test + def test_inferior_abort_received_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.inferior_abort_received() + + @llgs_test + @dwarf_test + def test_inferior_abort_received_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.inferior_abort_received() + Index: test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py =================================================================== --- /dev/null +++ test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py @@ -0,0 +1,46 @@ +import unittest2 + +# Add the directory above ours to the python library path since we +# will import from there. +import os.path +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +import gdbremote_testcase +import signal +from lldbtest import * + +class TestGdbRemoteSegFault(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + GDB_REMOTE_STOP_CODE_BAD_ACCESS = 0x91 + + def inferior_seg_fault_received(self, expected_signo): + procs = self.prep_debug_monitor_and_inferior(inferior_args=["segfault"]) + self.assertIsNotNone(procs) + + self.test_sequence.add_log_lines([ + "read packet: $vCont;c#a8", + {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", "capture":{ 1:"hex_exit_code"} }, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEquals(int(hex_exit_code, 16), expected_signo) + + @debugserver_test + @dsym_test + def test_inferior_seg_fault_received_debugserver_dsym(self): + self.init_debugserver_test() + self.buildDsym() + self.inferior_seg_fault_received(self.GDB_REMOTE_STOP_CODE_BAD_ACCESS) + + @llgs_test + @dwarf_test + def test_inferior_seg_fault_received_llgs_dwarf(self): + self.init_llgs_test() + self.buildDwarf() + self.inferior_seg_fault_received(signal.SIGSEGV) Index: test/tools/lldb-server/inferior-crash/main.cpp =================================================================== --- /dev/null +++ test/tools/lldb-server/inferior-crash/main.cpp @@ -0,0 +1,39 @@ +#include +#include +#include + +namespace +{ + const char *const SEGFAULT_COMMAND = "segfault"; + const char *const ABORT_COMMAND = "abort"; +} + +int main (int argc, char **argv) +{ + if (argc < 2) + { + std::cout << "expected at least one command provided on the command line" << std::endl; + } + + // Process command line args. + for (int i = 1; i < argc; ++i) + { + const char *const command = argv[i]; + if (std::strstr (command, SEGFAULT_COMMAND)) + { + // Perform a null pointer access. + int *const null_int_ptr = nullptr; + *null_int_ptr = 0xDEAD; + } + else if (std::strstr (command, ABORT_COMMAND)) + { + std::abort(); + } + else + { + std::cout << "Unsupported command: " << command << std::endl; + } + } + + return 0; +} Index: test/tools/lldb-server/lldbgdbserverutils.py =================================================================== --- /dev/null +++ test/tools/lldb-server/lldbgdbserverutils.py @@ -0,0 +1,838 @@ +"""Module for supporting unit testing of the lldb-gdbserver debug monitor exe. +""" + +import os +import os.path +import platform +import Queue +import re +import socket_packet_pump +import subprocess +import time + +def _get_debug_monitor_from_lldb(lldb_exe, debug_monitor_basename): + """Return the debug monitor exe path given the lldb exe path. + + This method attempts to construct a valid debug monitor exe name + from a given lldb exe name. It will return None if the synthesized + debug monitor name is not found to exist. + + The debug monitor exe path is synthesized by taking the directory + of the lldb exe, and replacing the portion of the base name that + matches "lldb" (case insensitive) and replacing with the value of + debug_monitor_basename. + + Args: + lldb_exe: the path to an lldb executable. + + debug_monitor_basename: the base name portion of the debug monitor + that will replace 'lldb'. + + Returns: + A path to the debug monitor exe if it is found to exist; otherwise, + returns None. + + """ + + exe_dir = os.path.dirname(lldb_exe) + exe_base = os.path.basename(lldb_exe) + + # we'll rebuild the filename by replacing lldb with + # the debug monitor basename, keeping any prefix or suffix in place. + regex = re.compile(r"lldb", re.IGNORECASE) + new_base = regex.sub(debug_monitor_basename, exe_base) + + debug_monitor_exe = os.path.join(exe_dir, new_base) + if os.path.exists(debug_monitor_exe): + return debug_monitor_exe + else: + return None + + +def get_lldb_gdbserver_exe(): + """Return the lldb-gdbserver exe path. + + Returns: + A path to the lldb-gdbserver exe if it is found to exist; otherwise, + returns None. + """ + if "LLDB_DEBUGSERVER_PATH" in os.environ: + return os.environ["LLDB_DEBUGSERVER_PATH"] + elif "LLDB_EXEC" in os.environ: + lldb_exe = os.environ["LLDB_EXEC"] + if not lldb_exe: + return None + else: + return _get_debug_monitor_from_lldb(lldb_exe, "lldb-server") + else: + return None + +def get_debugserver_exe(): + """Return the debugserver exe path. + + Returns: + A path to the debugserver exe if it is found to exist; otherwise, + returns None. + """ + if "LLDB_DEBUGSERVER_PATH" in os.environ: + return os.environ["LLDB_DEBUGSERVER_PATH"] + elif "LLDB_EXEC" in os.environ: + lldb_exe = os.environ["LLDB_EXEC"] + if not lldb_exe: + return None + else: + return _get_debug_monitor_from_lldb(lldb_exe, "debugserver") + else: + return None + + +_LOG_LINE_REGEX = re.compile(r'^(lldb-gdbserver|debugserver)\s+<\s*(\d+)>' + + '\s+(read|send)\s+packet:\s+(.+)$') + + +def _is_packet_lldb_gdbserver_input(packet_type, llgs_input_is_read): + """Return whether a given packet is input for lldb-gdbserver. + + Args: + packet_type: a string indicating 'send' or 'receive', from a + gdbremote packet protocol log. + + llgs_input_is_read: true if lldb-gdbserver input (content sent to + lldb-gdbserver) is listed as 'read' or 'send' in the packet + log entry. + + Returns: + True if the packet should be considered input for lldb-gdbserver; False + otherwise. + """ + if packet_type == 'read': + # when llgs is the read side, then a read packet is meant for + # input to llgs (when captured from the llgs/debugserver exe). + return llgs_input_is_read + elif packet_type == 'send': + # when llgs is the send side, then a send packet is meant to + # be input to llgs (when captured from the lldb exe). + return not llgs_input_is_read + else: + # don't understand what type of packet this is + raise "Unknown packet type: {}".format(packet_type) + + +def handle_O_packet(context, packet_contents, logger): + """Handle O packets.""" + if (not packet_contents) or (len(packet_contents) < 1): + return False + elif packet_contents[0] != "O": + return False + elif packet_contents == "OK": + return False + + new_text = gdbremote_hex_decode_string(packet_contents[1:]) + context["O_content"] += new_text + context["O_count"] += 1 + + if logger: + logger.debug("text: new \"{}\", cumulative: \"{}\"".format(new_text, context["O_content"])) + + return True + +_STRIP_CHECKSUM_REGEX = re.compile(r'#[0-9a-fA-F]{2}$') +_STRIP_COMMAND_PREFIX_REGEX = re.compile(r"^\$") +_STRIP_COMMAND_PREFIX_M_REGEX = re.compile(r"^\$m") + + +def assert_packets_equal(asserter, actual_packet, expected_packet): + # strip off the checksum digits of the packet. When we're in + # no-ack mode, the # checksum is ignored, and should not be cause + # for a mismatched packet. + actual_stripped = _STRIP_CHECKSUM_REGEX.sub('', actual_packet) + expected_stripped = _STRIP_CHECKSUM_REGEX.sub('', expected_packet) + asserter.assertEqual(actual_stripped, expected_stripped) + +def expect_lldb_gdbserver_replay( + asserter, + sock, + test_sequence, + timeout_seconds, + logger=None): + """Replay socket communication with lldb-gdbserver and verify responses. + + Args: + asserter: the object providing assertEqual(first, second, msg=None), e.g. TestCase instance. + + sock: the TCP socket connected to the lldb-gdbserver exe. + + test_sequence: a GdbRemoteTestSequence instance that describes + the messages sent to the gdb remote and the responses + expected from it. + + timeout_seconds: any response taking more than this number of + seconds will cause an exception to be raised. + + logger: a Python logger instance. + + Returns: + The context dictionary from running the given gdbremote + protocol sequence. This will contain any of the capture + elements specified to any GdbRemoteEntry instances in + test_sequence. + + The context will also contain an entry, context["O_content"] + which contains the text from the inferior received via $O + packets. $O packets should not attempt to be matched + directly since they are not entirely deterministic as to + how many arrive and how much text is in each one. + + context["O_count"] will contain an integer of the number of + O packets received. + """ + + # Ensure we have some work to do. + if len(test_sequence.entries) < 1: + return {} + + context = {"O_count":0, "O_content":""} + with socket_packet_pump.SocketPacketPump(sock, logger) as pump: + # Grab the first sequence entry. + sequence_entry = test_sequence.entries.pop(0) + + # While we have an active sequence entry, send messages + # destined for the stub and collect/match/process responses + # expected from the stub. + while sequence_entry: + if sequence_entry.is_send_to_remote(): + # This is an entry to send to the remote debug monitor. + send_packet = sequence_entry.get_send_packet() + if logger: + if len(send_packet) == 1 and send_packet[0] == chr(3): + packet_desc = "^C" + else: + packet_desc = send_packet + logger.info("sending packet to remote: {}".format(packet_desc)) + sock.sendall(send_packet) + else: + # This is an entry expecting to receive content from the remote debug monitor. + + # We'll pull from (and wait on) the queue appropriate for the type of matcher. + # We keep separate queues for process output (coming from non-deterministic + # $O packet division) and for all other packets. + if sequence_entry.is_output_matcher(): + try: + # Grab next entry from the output queue. + content = pump.output_queue().get(True, timeout_seconds) + except Queue.Empty: + if logger: + logger.warning("timeout waiting for stub output (accumulated output:{})".format(pump.get_accumulated_output())) + raise Exception("timed out while waiting for output match (accumulated output: {})".format(pump.get_accumulated_output())) + else: + try: + content = pump.packet_queue().get(True, timeout_seconds) + except Queue.Empty: + if logger: + logger.warning("timeout waiting for packet match (receive buffer: {})".format(pump.get_receive_buffer())) + raise Exception("timed out while waiting for packet match (receive buffer: {})".format(pump.get_receive_buffer())) + + # Give the sequence entry the opportunity to match the content. + # Output matchers might match or pass after more output accumulates. + # Other packet types generally must match. + asserter.assertIsNotNone(content) + context = sequence_entry.assert_match(asserter, content, context=context) + + # Move on to next sequence entry as needed. Some sequence entries support executing multiple + # times in different states (for looping over query/response packets). + if sequence_entry.is_consumed(): + if len(test_sequence.entries) > 0: + sequence_entry = test_sequence.entries.pop(0) + else: + sequence_entry = None + + # Fill in the O_content entries. + context["O_count"] = 1 + context["O_content"] = pump.get_accumulated_output() + + return context + +def gdbremote_hex_encode_string(str): + output = '' + for c in str: + output += '{0:02x}'.format(ord(c)) + return output + +def gdbremote_hex_decode_string(str): + return str.decode("hex") + +def gdbremote_packet_encode_string(str): + checksum = 0 + for c in str: + checksum += ord(c) + return '$' + str + '#{0:02x}'.format(checksum % 256) + +def build_gdbremote_A_packet(args_list): + """Given a list of args, create a properly-formed $A packet containing each arg. + """ + payload = "A" + + # build the arg content + arg_index = 0 + for arg in args_list: + # Comma-separate the args. + if arg_index > 0: + payload += ',' + + # Hex-encode the arg. + hex_arg = gdbremote_hex_encode_string(arg) + + # Build the A entry. + payload += "{},{},{}".format(len(hex_arg), arg_index, hex_arg) + + # Next arg index, please. + arg_index += 1 + + # return the packetized payload + return gdbremote_packet_encode_string(payload) + + +def parse_reg_info_response(response_packet): + if not response_packet: + raise Exception("response_packet cannot be None") + + # Strip off prefix $ and suffix #xx if present. + response_packet = _STRIP_COMMAND_PREFIX_REGEX.sub("", response_packet) + response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) + + # Build keyval pairs + values = {} + for kv in response_packet.split(";"): + if len(kv) < 1: + continue + (key, val) = kv.split(':') + values[key] = val + + return values + + +def parse_threadinfo_response(response_packet): + if not response_packet: + raise Exception("response_packet cannot be None") + + # Strip off prefix $ and suffix #xx if present. + response_packet = _STRIP_COMMAND_PREFIX_M_REGEX.sub("", response_packet) + response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) + + # Return list of thread ids + return [int(thread_id_hex,16) for thread_id_hex in response_packet.split(",") if len(thread_id_hex) > 0] + +def unpack_endian_binary_string(endian, value_string): + """Unpack a gdb-remote binary (post-unescaped, i.e. not escaped) response to an unsigned int given endianness of the inferior.""" + if not endian: + raise Exception("endian cannot be None") + if not value_string or len(value_string) < 1: + raise Exception("value_string cannot be None or empty") + + if endian == 'little': + value = 0 + i = 0 + while len(value_string) > 0: + value += (ord(value_string[0]) << i) + value_string = value_string[1:] + i += 8 + return value + elif endian == 'big': + value = 0 + while len(value_string) > 0: + value = (value << 8) + ord(value_string[0]) + value_string = value_string[1:] + return value + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + +def unpack_register_hex_unsigned(endian, value_string): + """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" + if not endian: + raise Exception("endian cannot be None") + if not value_string or len(value_string) < 1: + raise Exception("value_string cannot be None or empty") + + if endian == 'little': + value = 0 + i = 0 + while len(value_string) > 0: + value += (int(value_string[0:2], 16) << i) + value_string = value_string[2:] + i += 8 + return value + elif endian == 'big': + return int(value_string, 16) + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + +def pack_register_hex(endian, value, byte_size=None): + """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" + if not endian: + raise Exception("endian cannot be None") + + if endian == 'little': + # Create the litt-endian return value. + retval = "" + while value != 0: + retval = retval + "{:02x}".format(value & 0xff) + value = value >> 8 + if byte_size: + # Add zero-fill to the right/end (MSB side) of the value. + retval += "00" * (byte_size - len(retval)/2) + return retval + + elif endian == 'big': + retval = value.encode("hex") + if byte_size: + # Add zero-fill to the left/front (MSB side) of the value. + retval = ("00" * (byte_size - len(retval)/2)) + retval + return retval + + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + +class GdbRemoteEntryBase(object): + def is_output_matcher(self): + return False + +class GdbRemoteEntry(GdbRemoteEntryBase): + + def __init__(self, is_send_to_remote=True, exact_payload=None, regex=None, capture=None, expect_captures=None): + """Create an entry representing one piece of the I/O to/from a gdb remote debug monitor. + + Args: + + is_send_to_remote: True if this entry is a message to be + sent to the gdbremote debug monitor; False if this + entry represents text to be matched against the reply + from the gdbremote debug monitor. + + exact_payload: if not None, then this packet is an exact + send (when sending to the remote) or an exact match of + the response from the gdbremote. The checksums are + ignored on exact match requests since negotiation of + no-ack makes the checksum content essentially + undefined. + + regex: currently only valid for receives from gdbremote. + When specified (and only if exact_payload is None), + indicates the gdbremote response must match the given + regex. Match groups in the regex can be used for two + different purposes: saving the match (see capture + arg), or validating that a match group matches a + previously established value (see expect_captures). It + is perfectly valid to have just a regex arg and to + specify neither capture or expect_captures args. This + arg only makes sense if exact_payload is not + specified. + + capture: if specified, is a dictionary of regex match + group indices (should start with 1) to variable names + that will store the capture group indicated by the + index. For example, {1:"thread_id"} will store capture + group 1's content in the context dictionary where + "thread_id" is the key and the match group value is + the value. The value stored off can be used later in a + expect_captures expression. This arg only makes sense + when regex is specified. + + expect_captures: if specified, is a dictionary of regex + match group indices (should start with 1) to variable + names, where the match group should match the value + existing in the context at the given variable name. + For example, {2:"thread_id"} indicates that the second + match group must match the value stored under the + context's previously stored "thread_id" key. This arg + only makes sense when regex is specified. + """ + self._is_send_to_remote = is_send_to_remote + self.exact_payload = exact_payload + self.regex = regex + self.capture = capture + self.expect_captures = expect_captures + + def is_send_to_remote(self): + return self._is_send_to_remote + + def is_consumed(self): + # For now, all packets are consumed after first use. + return True + + def get_send_packet(self): + if not self.is_send_to_remote(): + raise Exception("get_send_packet() called on GdbRemoteEntry that is not a send-to-remote packet") + if not self.exact_payload: + raise Exception("get_send_packet() called on GdbRemoteEntry but it doesn't have an exact payload") + return self.exact_payload + + def _assert_exact_payload_match(self, asserter, actual_packet): + assert_packets_equal(asserter, actual_packet, self.exact_payload) + return None + + def _assert_regex_match(self, asserter, actual_packet, context): + # Ensure the actual packet matches from the start of the actual packet. + match = self.regex.match(actual_packet) + if not match: + asserter.fail("regex '{}' failed to match against content '{}'".format(self.regex.pattern, actual_packet)) + + if self.capture: + # Handle captures. + for group_index, var_name in self.capture.items(): + capture_text = match.group(group_index) + # It is okay for capture text to be None - which it will be if it is a group that can match nothing. + # The user must be okay with it since the regex itself matched above. + context[var_name] = capture_text + + if self.expect_captures: + # Handle comparing matched groups to context dictionary entries. + for group_index, var_name in self.expect_captures.items(): + capture_text = match.group(group_index) + if not capture_text: + raise Exception("No content to expect for group index {}".format(group_index)) + asserter.assertEquals(capture_text, context[var_name]) + + return context + + def assert_match(self, asserter, actual_packet, context=None): + # This only makes sense for matching lines coming from the + # remote debug monitor. + if self.is_send_to_remote(): + raise Exception("Attempted to match a packet being sent to the remote debug monitor, doesn't make sense.") + + # Create a new context if needed. + if not context: + context = {} + + # If this is an exact payload, ensure they match exactly, + # ignoring the packet checksum which is optional for no-ack + # mode. + if self.exact_payload: + self._assert_exact_payload_match(asserter, actual_packet) + return context + elif self.regex: + return self._assert_regex_match(asserter, actual_packet, context) + else: + raise Exception("Don't know how to match a remote-sent packet when exact_payload isn't specified.") + +class MultiResponseGdbRemoteEntry(GdbRemoteEntryBase): + """Represents a query/response style packet. + + Assumes the first item is sent to the gdb remote. + An end sequence regex indicates the end of the query/response + packet sequence. All responses up through (but not including) the + end response are stored in a context variable. + + Settings accepted from params: + + next_query or query: required. The typical query packet without the $ prefix or #xx suffix. + If there is a special first packet to start the iteration query, see the + first_query key. + + first_query: optional. If the first query requires a special query command, specify + it with this key. Do not specify the $ prefix or #xx suffix. + + append_iteration_suffix: defaults to False. Specify True if the 0-based iteration + index should be appended as a suffix to the command. e.g. qRegisterInfo with + this key set true will generate query packets of qRegisterInfo0, qRegisterInfo1, + etc. + + end_regex: required. Specifies a compiled regex object that will match the full text + of any response that signals an end to the iteration. It must include the + initial $ and ending #xx and must match the whole packet. + + save_key: required. Specifies the key within the context where an array will be stored. + Each packet received from the gdb remote that does not match the end_regex will get + appended to the array stored within the context at that key. + + runaway_response_count: optional. Defaults to 10000. If this many responses are retrieved, + assume there is something wrong with either the response collection or the ending + detection regex and throw an exception. + """ + def __init__(self, params): + self._next_query = params.get("next_query", params.get("query")) + if not self._next_query: + raise "either next_query or query key must be specified for MultiResponseGdbRemoteEntry" + + self._first_query = params.get("first_query", self._next_query) + self._append_iteration_suffix = params.get("append_iteration_suffix", False) + self._iteration = 0 + self._end_regex = params["end_regex"] + self._save_key = params["save_key"] + self._runaway_response_count = params.get("runaway_response_count", 10000) + self._is_send_to_remote = True + self._end_matched = False + + def is_send_to_remote(self): + return self._is_send_to_remote + + def get_send_packet(self): + if not self.is_send_to_remote(): + raise Exception("get_send_packet() called on MultiResponseGdbRemoteEntry that is not in the send state") + if self._end_matched: + raise Exception("get_send_packet() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") + + # Choose the first or next query for the base payload. + if self._iteration == 0 and self._first_query: + payload = self._first_query + else: + payload = self._next_query + + # Append the suffix as needed. + if self._append_iteration_suffix: + payload += "%x" % self._iteration + + # Keep track of the iteration. + self._iteration += 1 + + # Now that we've given the query packet, flip the mode to receive/match. + self._is_send_to_remote = False + + # Return the result, converted to packet form. + return gdbremote_packet_encode_string(payload) + + def is_consumed(self): + return self._end_matched + + def assert_match(self, asserter, actual_packet, context=None): + # This only makes sense for matching lines coming from the remote debug monitor. + if self.is_send_to_remote(): + raise Exception("assert_match() called on MultiResponseGdbRemoteEntry but state is set to send a query packet.") + + if self._end_matched: + raise Exception("assert_match() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") + + # Set up a context as needed. + if not context: + context = {} + + # Check if the packet matches the end condition. + match = self._end_regex.match(actual_packet) + if match: + # We're done iterating. + self._end_matched = True + return context + + # Not done iterating - save the packet. + context[self._save_key] = context.get(self._save_key, []) + context[self._save_key].append(actual_packet) + + # Check for a runaway response cycle. + if len(context[self._save_key]) >= self._runaway_response_count: + raise Exception("runaway query/response cycle detected: %d responses captured so far. Last response: %s" % + (len(context[self._save_key]), context[self._save_key][-1])) + + # Flip the mode to send for generating the query. + self._is_send_to_remote = True + return context + +class MatchRemoteOutputEntry(GdbRemoteEntryBase): + """Waits for output from the debug monitor to match a regex or time out. + + This entry type tries to match each time new gdb remote output is accumulated + using a provided regex. If the output does not match the regex within the + given timeframe, the command fails the playback session. If the regex does + match, any capture fields are recorded in the context. + + Settings accepted from params: + + regex: required. Specifies a compiled regex object that must either succeed + with re.match or re.search (see regex_mode below) within the given timeout + (see timeout_seconds below) or cause the playback to fail. + + regex_mode: optional. Available values: "match" or "search". If "match", the entire + stub output as collected so far must match the regex. If search, then the regex + must match starting somewhere within the output text accumulated thus far. + Default: "match" (i.e. the regex must match the entirety of the accumulated output + buffer, so unexpected text will generally fail the match). + + capture: optional. If specified, is a dictionary of regex match group indices (should start + with 1) to variable names that will store the capture group indicated by the + index. For example, {1:"thread_id"} will store capture group 1's content in the + context dictionary where "thread_id" is the key and the match group value is + the value. The value stored off can be used later in a expect_captures expression. + This arg only makes sense when regex is specified. + """ + def __init__(self, regex=None, regex_mode="match", capture=None): + self._regex = regex + self._regex_mode = regex_mode + self._capture = capture + self._matched = False + + if not self._regex: + raise Exception("regex cannot be None") + + if not self._regex_mode in ["match", "search"]: + raise Exception("unsupported regex mode \"{}\": must be \"match\" or \"search\"".format(self._regex_mode)) + + def is_output_matcher(self): + return True + + def is_send_to_remote(self): + # This is always a "wait for remote" command. + return False + + def is_consumed(self): + return self._matched + + def assert_match(self, asserter, accumulated_output, context): + # Validate args. + if not accumulated_output: + raise Exception("accumulated_output cannot be none") + if not context: + raise Exception("context cannot be none") + + # Validate that we haven't already matched. + if self._matched: + raise Exception("invalid state - already matched, attempting to match again") + + # If we don't have any content yet, we don't match. + if len(accumulated_output) < 1: + return context + + # Check if we match + if self._regex_mode == "match": + match = self._regex.match(accumulated_output) + elif self._regex_mode == "search": + match = self._regex.search(accumulated_output) + else: + raise Exception("Unexpected regex mode: {}".format(self._regex_mode)) + + # If we don't match, wait to try again after next $O content, or time out. + if not match: + # print "re pattern \"{}\" did not match against \"{}\"".format(self._regex.pattern, accumulated_output) + return context + + # We do match. + self._matched = True + # print "re pattern \"{}\" matched against \"{}\"".format(self._regex.pattern, accumulated_output) + + # Collect up any captures into the context. + if self._capture: + # Handle captures. + for group_index, var_name in self._capture.items(): + capture_text = match.group(group_index) + if not capture_text: + raise Exception("No content for group index {}".format(group_index)) + context[var_name] = capture_text + + return context + + +class GdbRemoteTestSequence(object): + + _LOG_LINE_REGEX = re.compile(r'^.*(read|send)\s+packet:\s+(.+)$') + + def __init__(self, logger): + self.entries = [] + self.logger = logger + + def add_log_lines(self, log_lines, remote_input_is_read): + for line in log_lines: + if type(line) == str: + # Handle log line import + # if self.logger: + # self.logger.debug("processing log line: {}".format(line)) + match = self._LOG_LINE_REGEX.match(line) + if match: + playback_packet = match.group(2) + direction = match.group(1) + if _is_packet_lldb_gdbserver_input(direction, remote_input_is_read): + # Handle as something to send to the remote debug monitor. + # if self.logger: + # self.logger.info("processed packet to send to remote: {}".format(playback_packet)) + self.entries.append(GdbRemoteEntry(is_send_to_remote=True, exact_payload=playback_packet)) + else: + # Log line represents content to be expected from the remote debug monitor. + # if self.logger: + # self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet)) + self.entries.append(GdbRemoteEntry(is_send_to_remote=False,exact_payload=playback_packet)) + else: + raise Exception("failed to interpret log line: {}".format(line)) + elif type(line) == dict: + entry_type = line.get("type", "regex_capture") + if entry_type == "regex_capture": + # Handle more explicit control over details via dictionary. + direction = line.get("direction", None) + regex = line.get("regex", None) + capture = line.get("capture", None) + expect_captures = line.get("expect_captures", None) + + # Compile the regex. + if regex and (type(regex) == str): + regex = re.compile(regex) + + if _is_packet_lldb_gdbserver_input(direction, remote_input_is_read): + # Handle as something to send to the remote debug monitor. + # if self.logger: + # self.logger.info("processed dict sequence to send to remote") + self.entries.append(GdbRemoteEntry(is_send_to_remote=True, regex=regex, capture=capture, expect_captures=expect_captures)) + else: + # Log line represents content to be expected from the remote debug monitor. + # if self.logger: + # self.logger.info("processed dict sequence to match receiving from remote") + self.entries.append(GdbRemoteEntry(is_send_to_remote=False, regex=regex, capture=capture, expect_captures=expect_captures)) + elif entry_type == "multi_response": + self.entries.append(MultiResponseGdbRemoteEntry(line)) + elif entry_type == "output_match": + + regex = line.get("regex", None) + # Compile the regex. + if regex and (type(regex) == str): + regex = re.compile(regex) + + regex_mode = line.get("regex_mode", "match") + capture = line.get("capture", None) + self.entries.append(MatchRemoteOutputEntry(regex=regex, regex_mode=regex_mode, capture=capture)) + else: + raise Exception("unknown entry type \"%s\"" % entry_type) + +def process_is_running(pid, unknown_value=True): + """If possible, validate that the given pid represents a running process on the local system. + + Args: + + pid: an OS-specific representation of a process id. Should be an integral value. + + unknown_value: value used when we cannot determine how to check running local + processes on the OS. + + Returns: + + If we can figure out how to check running process ids on the given OS: + return True if the process is running, or False otherwise. + + If we don't know how to check running process ids on the given OS: + return the value provided by the unknown_value arg. + """ + if type(pid) != int: + raise Exception("pid must be of type int") + + process_ids = [] + + if platform.system() in ['Darwin', 'Linux', 'FreeBSD', 'NetBSD']: + # Build the list of running process ids + output = subprocess.check_output("ps ax | awk '{ print $1; }'", shell=True) + text_process_ids = output.split('\n')[1:] + # Convert text pids to ints + process_ids = [int(text_pid) for text_pid in text_process_ids if text_pid != ''] + # elif {your_platform_here}: + # fill in process_ids as a list of int type process IDs running on + # the local system. + else: + # Don't know how to get list of running process IDs on this + # OS, so return the "don't know" value. + return unknown_value + + # Check if the pid is in the process_ids + return pid in process_ids + +if __name__ == '__main__': + EXE_PATH = get_lldb_gdbserver_exe() + if EXE_PATH: + print "lldb-gdbserver path detected: {}".format(EXE_PATH) + else: + print "lldb-gdbserver could not be found" Index: test/tools/lldb-server/main.cpp =================================================================== --- /dev/null +++ test/tools/lldb-server/main.cpp @@ -0,0 +1,404 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__APPLE__) +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2) +int pthread_threadid_np(pthread_t,__uint64_t*); +#elif defined(__linux__) +#include +#endif + +#if defined(__linux__) +#include +#endif + +static const char *const RETVAL_PREFIX = "retval:"; +static const char *const SLEEP_PREFIX = "sleep:"; +static const char *const STDERR_PREFIX = "stderr:"; +static const char *const SET_MESSAGE_PREFIX = "set-message:"; +static const char *const PRINT_MESSAGE_COMMAND = "print-message:"; +static const char *const GET_DATA_ADDRESS_PREFIX = "get-data-address-hex:"; +static const char *const GET_STACK_ADDRESS_COMMAND = "get-stack-address-hex:"; +static const char *const GET_HEAP_ADDRESS_COMMAND = "get-heap-address-hex:"; + +static const char *const GET_CODE_ADDRESS_PREFIX = "get-code-address-hex:"; +static const char *const CALL_FUNCTION_PREFIX = "call-function:"; + +static const char *const THREAD_PREFIX = "thread:"; +static const char *const THREAD_COMMAND_NEW = "new"; +static const char *const THREAD_COMMAND_PRINT_IDS = "print-ids"; +static const char *const THREAD_COMMAND_SEGFAULT = "segfault"; + +static bool g_print_thread_ids = false; +static pthread_mutex_t g_print_mutex = PTHREAD_MUTEX_INITIALIZER; +static bool g_threads_do_segfault = false; + +static pthread_mutex_t g_jump_buffer_mutex = PTHREAD_MUTEX_INITIALIZER; +static jmp_buf g_jump_buffer; +static bool g_is_segfaulting = false; + +static char g_message[256]; + +static volatile char g_c1 = '0'; +static volatile char g_c2 = '1'; + +static void +print_thread_id () +{ + // Put in the right magic here for your platform to spit out the thread id (tid) that debugserver/lldb-gdbserver would see as a TID. + // Otherwise, let the else clause print out the unsupported text so that the unit test knows to skip verifying thread ids. +#if defined(__APPLE__) + __uint64_t tid = 0; + pthread_threadid_np(pthread_self(), &tid); + printf ("%" PRIx64, tid); +#elif defined (__linux__) + // This is a call to gettid() via syscall. + printf ("%" PRIx64, static_cast (syscall (__NR_gettid))); +#else + printf("{no-tid-support}"); +#endif +} + +static void +signal_handler (int signo) +{ + const char *signal_name = nullptr; + switch (signo) + { + case SIGUSR1: signal_name = "SIGUSR1"; break; + case SIGSEGV: signal_name = "SIGSEGV"; break; + default: signal_name = nullptr; + } + + // Print notice that we received the signal on a given thread. + pthread_mutex_lock (&g_print_mutex); + if (signal_name) + printf ("received %s on thread id: ", signal_name); + else + printf ("received signo %d (%s) on thread id: ", signo, strsignal (signo)); + print_thread_id (); + printf ("\n"); + pthread_mutex_unlock (&g_print_mutex); + + // Reset the signal handler if we're one of the expected signal handlers. + switch (signo) + { + case SIGSEGV: + if (g_is_segfaulting) + { + // Fix up the pointer we're writing to. This needs to happen if nothing intercepts the SIGSEGV + // (i.e. if somebody runs this from the command line). + longjmp(g_jump_buffer, 1); + } + break; + case SIGUSR1: + if (g_is_segfaulting) + { + // Fix up the pointer we're writing to. This is used to test gdb remote signal delivery. + // A SIGSEGV will be raised when the thread is created, switched out for a SIGUSR1, and + // then this code still needs to fix the seg fault. + // (i.e. if somebody runs this from the command line). + longjmp(g_jump_buffer, 1); + } + break; + } + + // Reset the signal handler. + sig_t sig_result = signal (signo, signal_handler); + if (sig_result == SIG_ERR) + { + fprintf(stderr, "failed to set signal handler: errno=%d\n", errno); + exit (1); + } +} + +static void +swap_chars () +{ + g_c1 = '1'; + g_c2 = '0'; + + g_c1 = '0'; + g_c2 = '1'; +} + +static void +hello () +{ + pthread_mutex_lock (&g_print_mutex); + printf ("hello, world\n"); + pthread_mutex_unlock (&g_print_mutex); +} + +static void* +thread_func (void *arg) +{ + static pthread_mutex_t s_thread_index_mutex = PTHREAD_MUTEX_INITIALIZER; + static int s_thread_index = 1; + + pthread_mutex_lock (&s_thread_index_mutex); + const int this_thread_index = s_thread_index++; + pthread_mutex_unlock (&s_thread_index_mutex); + + if (g_print_thread_ids) + { + pthread_mutex_lock (&g_print_mutex); + printf ("thread %d id: ", this_thread_index); + print_thread_id (); + printf ("\n"); + pthread_mutex_unlock (&g_print_mutex); + } + + if (g_threads_do_segfault) + { + // Sleep for a number of seconds based on the thread index. + // TODO add ability to send commands to test exe so we can + // handle timing more precisely. This is clunky. All we're + // trying to do is add predictability as to the timing of + // signal generation by created threads. + int sleep_seconds = 2 * (this_thread_index - 1); + while (sleep_seconds > 0) + sleep_seconds = sleep(sleep_seconds); + + // Test creating a SEGV. + pthread_mutex_lock (&g_jump_buffer_mutex); + g_is_segfaulting = true; + int *bad_p = nullptr; + if (setjmp(g_jump_buffer) == 0) + { + // Force a seg fault signal on this thread. + *bad_p = 0; + } + else + { + // Tell the system we're no longer seg faulting. + // Used by the SIGUSR1 signal handler that we inject + // in place of the SIGSEGV so it only tries to + // recover from the SIGSEGV if this seg fault code + // was in play. + g_is_segfaulting = false; + } + pthread_mutex_unlock (&g_jump_buffer_mutex); + + pthread_mutex_lock (&g_print_mutex); + printf ("thread "); + print_thread_id (); + printf (": past SIGSEGV\n"); + pthread_mutex_unlock (&g_print_mutex); + } + + int sleep_seconds_remaining = 5; + while (sleep_seconds_remaining > 0) + { + sleep_seconds_remaining = sleep (sleep_seconds_remaining); + } + + return nullptr; +} + +int main (int argc, char **argv) +{ +#if defined(__linux__) + // Immediately enable any ptracer so that we can allow the stub attach + // operation to succeed. Some Linux kernels are locked down so that + // only an ancestor can be a ptracer of a process. This disables that + // restriction. Without it, attach-related stub tests will fail. +#if defined(PR_SET_PTRACER) && defined(PR_SET_PTRACER_ANY) + const int prctl_result = prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0); + static_cast (prctl_result); +#endif +#endif + + std::vector threads; + std::unique_ptr heap_array_up; + int return_value = 0; + + // Set the signal handler. + sig_t sig_result = signal (SIGALRM, signal_handler); + if (sig_result == SIG_ERR) + { + fprintf(stderr, "failed to set SIGALRM signal handler: errno=%d\n", errno); + exit (1); + } + + sig_result = signal (SIGUSR1, signal_handler); + if (sig_result == SIG_ERR) + { + fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); + exit (1); + } + + sig_result = signal (SIGSEGV, signal_handler); + if (sig_result == SIG_ERR) + { + fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); + exit (1); + } + + // Process command line args. + for (int i = 1; i < argc; ++i) + { + if (std::strstr (argv[i], STDERR_PREFIX)) + { + // Treat remainder as text to go to stderr. + fprintf (stderr, "%s\n", (argv[i] + strlen (STDERR_PREFIX))); + } + else if (std::strstr (argv[i], RETVAL_PREFIX)) + { + // Treat as the return value for the program. + return_value = std::atoi (argv[i] + strlen (RETVAL_PREFIX)); + } + else if (std::strstr (argv[i], SLEEP_PREFIX)) + { + // Treat as the amount of time to have this process sleep (in seconds). + int sleep_seconds_remaining = std::atoi (argv[i] + strlen (SLEEP_PREFIX)); + + // Loop around, sleeping until all sleep time is used up. Note that + // signals will cause sleep to end early with the number of seconds remaining. + for (int i = 0; sleep_seconds_remaining > 0; ++i) + { + sleep_seconds_remaining = sleep (sleep_seconds_remaining); + // std::cout << "sleep result (call " << i << "): " << sleep_seconds_remaining << std::endl; + } + } + else if (std::strstr (argv[i], SET_MESSAGE_PREFIX)) + { + // Copy the contents after "set-message:" to the g_message buffer. + // Used for reading inferior memory and verifying contents match expectations. + strncpy (g_message, argv[i] + strlen (SET_MESSAGE_PREFIX), sizeof (g_message)); + + // Ensure we're null terminated. + g_message[sizeof (g_message) - 1] = '\0'; + + } + else if (std::strstr (argv[i], PRINT_MESSAGE_COMMAND)) + { + pthread_mutex_lock (&g_print_mutex); + printf ("message: %s\n", g_message); + pthread_mutex_unlock (&g_print_mutex); + } + else if (std::strstr (argv[i], GET_DATA_ADDRESS_PREFIX)) + { + volatile void *data_p = nullptr; + + if (std::strstr (argv[i] + strlen (GET_DATA_ADDRESS_PREFIX), "g_message")) + data_p = &g_message[0]; + else if (std::strstr (argv[i] + strlen (GET_DATA_ADDRESS_PREFIX), "g_c1")) + data_p = &g_c1; + else if (std::strstr (argv[i] + strlen (GET_DATA_ADDRESS_PREFIX), "g_c2")) + data_p = &g_c2; + + pthread_mutex_lock (&g_print_mutex); + printf ("data address: %p\n", data_p); + pthread_mutex_unlock (&g_print_mutex); + } + else if (std::strstr (argv[i], GET_HEAP_ADDRESS_COMMAND)) + { + // Create a byte array if not already present. + if (!heap_array_up) + heap_array_up.reset (new uint8_t[32]); + + pthread_mutex_lock (&g_print_mutex); + printf ("heap address: %p\n", heap_array_up.get ()); + pthread_mutex_unlock (&g_print_mutex); + } + else if (std::strstr (argv[i], GET_STACK_ADDRESS_COMMAND)) + { + pthread_mutex_lock (&g_print_mutex); + printf ("stack address: %p\n", &return_value); + pthread_mutex_unlock (&g_print_mutex); + } + else if (std::strstr (argv[i], GET_CODE_ADDRESS_PREFIX)) + { + void (*func_p)() = nullptr; + + if (std::strstr (argv[i] + strlen (GET_CODE_ADDRESS_PREFIX), "hello")) + func_p = hello; + else if (std::strstr (argv[i] + strlen (GET_CODE_ADDRESS_PREFIX), "swap_chars")) + func_p = swap_chars; + + pthread_mutex_lock (&g_print_mutex); + printf ("code address: %p\n", func_p); + pthread_mutex_unlock (&g_print_mutex); + } + else if (std::strstr (argv[i], CALL_FUNCTION_PREFIX)) + { + // Defaut to providing the address of main. + if (std::strcmp (argv[i] + strlen (CALL_FUNCTION_PREFIX), "hello") == 0) + hello(); + else if (std::strcmp (argv[i] + strlen (CALL_FUNCTION_PREFIX), "swap_chars") == 0) + swap_chars(); + else + { + pthread_mutex_lock (&g_print_mutex); + printf ("unknown function: %s\n", argv[i] + strlen (CALL_FUNCTION_PREFIX)); + pthread_mutex_unlock (&g_print_mutex); + } + } + else if (std::strstr (argv[i], THREAD_PREFIX)) + { + // Check if we're creating a new thread. + if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_NEW)) + { + // Create a new thread. + pthread_t new_thread; + const int err = ::pthread_create (&new_thread, nullptr, thread_func, nullptr); + if (err) + { + fprintf (stderr, "pthread_create() failed with error code %d\n", err); + exit (err); + } + threads.push_back (new_thread); + } + else if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_PRINT_IDS)) + { + // Turn on thread id announcing. + g_print_thread_ids = true; + + // And announce us. + pthread_mutex_lock (&g_print_mutex); + printf ("thread 0 id: "); + print_thread_id (); + printf ("\n"); + pthread_mutex_unlock (&g_print_mutex); + } + else if (std::strstr (argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_SEGFAULT)) + { + g_threads_do_segfault = true; + } + else + { + // At this point we don't do anything else with threads. + // Later use thread index and send command to thread. + } + } + else + { + // Treat the argument as text for stdout. + printf("%s\n", argv[i]); + } + } + + // If we launched any threads, join them + for (std::vector::iterator it = threads.begin (); it != threads.end (); ++it) + { + void *thread_retval = nullptr; + const int err = ::pthread_join (*it, &thread_retval); + if (err != 0) + fprintf (stderr, "pthread_join() failed with error code %d\n", err); + } + + return return_value; +} Index: test/tools/lldb-server/socket_packet_pump.py =================================================================== --- /dev/null +++ test/tools/lldb-server/socket_packet_pump.py @@ -0,0 +1,180 @@ +import Queue +import re +import select +import threading +import traceback + +def _handle_output_packet_string(packet_contents): + if (not packet_contents) or (len(packet_contents) < 1): + return None + elif packet_contents[0] != "O": + return None + elif packet_contents == "OK": + return None + else: + return packet_contents[1:].decode("hex") + +def _dump_queue(the_queue): + while not the_queue.empty(): + print the_queue.get(True) + print "\n" + +class SocketPacketPump(object): + """A threaded packet reader that partitions packets into two streams. + + All incoming $O packet content is accumulated with the current accumulation + state put into the OutputQueue. + + All other incoming packets are placed in the packet queue. + + A select thread can be started and stopped, and runs to place packet + content into the two queues. + """ + + _GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}') + + def __init__(self, pump_socket, logger=None): + if not pump_socket: + raise Exception("pump_socket cannot be None") + + self._output_queue = Queue.Queue() + self._packet_queue = Queue.Queue() + self._thread = None + self._stop_thread = False + self._socket = pump_socket + self._logger = logger + self._receive_buffer = "" + self._accumulated_output = "" + + def __enter__(self): + """Support the python 'with' statement. + + Start the pump thread.""" + self.start_pump_thread() + return self + + def __exit__(self, exit_type, value, the_traceback): + """Support the python 'with' statement. + + Shut down the pump thread.""" + self.stop_pump_thread() + + # Warn if there is any content left in any of the queues. + # That would represent unmatched packets. + if not self.output_queue().empty(): + print "warning: output queue entries still exist:" + _dump_queue(self.output_queue()) + print "from here:" + traceback.print_stack() + + if not self.packet_queue().empty(): + print "warning: packet queue entries still exist:" + _dump_queue(self.packet_queue()) + print "from here:" + traceback.print_stack() + + def start_pump_thread(self): + if self._thread: + raise Exception("pump thread is already running") + self._stop_thread = False + self._thread = threading.Thread(target=self._run_method) + self._thread.start() + + def stop_pump_thread(self): + self._stop_thread = True + if self._thread: + self._thread.join() + + def output_queue(self): + return self._output_queue + + def packet_queue(self): + return self._packet_queue + + def _process_new_bytes(self, new_bytes): + if not new_bytes: + return + if len(new_bytes) < 1: + return + + # Add new bytes to our accumulated unprocessed packet bytes. + self._receive_buffer += new_bytes + + # Parse fully-formed packets into individual packets. + has_more = len(self._receive_buffer) > 0 + while has_more: + if len(self._receive_buffer) <= 0: + has_more = False + # handle '+' ack + elif self._receive_buffer[0] == "+": + self._packet_queue.put("+") + self._receive_buffer = self._receive_buffer[1:] + if self._logger: + self._logger.debug( + "parsed packet from stub: +\n" + + "new receive_buffer: {}".format( + self._receive_buffer)) + else: + packet_match = self._GDB_REMOTE_PACKET_REGEX.match( + self._receive_buffer) + if packet_match: + # Our receive buffer matches a packet at the + # start of the receive buffer. + new_output_content = _handle_output_packet_string( + packet_match.group(1)) + if new_output_content: + # This was an $O packet with new content. + self._accumulated_output += new_output_content + self._output_queue.put(self._accumulated_output) + else: + # Any packet other than $O. + self._packet_queue.put(packet_match.group(0)) + + # Remove the parsed packet from the receive + # buffer. + self._receive_buffer = self._receive_buffer[ + len(packet_match.group(0)):] + if self._logger: + self._logger.debug( + "parsed packet from stub: " + + packet_match.group(0)) + self._logger.debug( + "new receive_buffer: " + + self._receive_buffer) + else: + # We don't have enough in the receive bufferto make a full + # packet. Stop trying until we read more. + has_more = False + + def _run_method(self): + self._receive_buffer = "" + self._accumulated_output = "" + + if self._logger: + self._logger.info("socket pump starting") + + # Keep looping around until we're asked to stop the thread. + while not self._stop_thread: + can_read, _, _ = select.select([self._socket], [], [], 0) + if can_read and self._socket in can_read: + try: + new_bytes = self._socket.recv(4096) + if self._logger and new_bytes and len(new_bytes) > 0: + self._logger.debug( + "pump received bytes: {}".format(new_bytes)) + except: + # Likely a closed socket. Done with the pump thread. + if self._logger: + self._logger.debug( + "socket read failed, stopping pump read thread") + break + self._process_new_bytes(new_bytes) + + if self._logger: + self._logger.info("socket pump exiting") + + def get_accumulated_output(self): + return self._accumulated_output + + def get_receive_buffer(self): + return self._receive_buffer Index: test/tools/lldb-server/test/test_lldbgdbserverutils.py =================================================================== --- /dev/null +++ test/tools/lldb-server/test/test_lldbgdbserverutils.py @@ -0,0 +1,53 @@ +import os.path +import re +import sys + +# adjust path for embedded unittest2 +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..')) +import unittest2 + +# adjust path for lldbgdbserverutils.py +sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) +from lldbgdbserverutils import * + + +class TestLldbGdbServerUtils(unittest2.TestCase): + def test_entry_exact_payload_match(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + entry.assert_match(self, "$OK#9a") + + def test_entry_exact_payload_match_ignores_checksum(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + entry.assert_match(self, "$OK#00") + + def test_entry_creates_context(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + context = entry.assert_match(self, "$OK#9a") + self.assertIsNotNone(context) + + def test_entry_regex_matches(self): + entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), capture={ 1:"thread_id" }) + context = entry.assert_match(self, "$QC980#00") + + def test_entry_regex_saves_match(self): + entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), capture={ 1:"thread_id" }) + context = entry.assert_match(self, "$QC980#00") + self.assertEquals(context["thread_id"], "980") + + def test_entry_regex_expect_captures_success(self): + context = { "thread_id":"980" } + entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), expect_captures={ 2:"thread_id" }) + entry.assert_match(self, "$T11thread:980;", context=context) + + def test_entry_regex_expect_captures_raises_on_fail(self): + context = { "thread_id":"980" } + entry = GdbRemoteEntry(is_send_to_remote=False, regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), expect_captures={ 2:"thread_id" }) + try: + entry.assert_match(self, "$T11thread:970;", context=context) + self.fail() + except AssertionError: + # okay + return None + +if __name__ == '__main__': + unittest2.main()