diff --git a/openmp/libompd/CMakeLists.txt b/openmp/libompd/CMakeLists.txt --- a/openmp/libompd/CMakeLists.txt +++ b/openmp/libompd/CMakeLists.txt @@ -11,4 +11,7 @@ if(LIBOMP_OMPD_SUPPORT) set(OMPD_INCLUDE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/src/) add_subdirectory(src) + if(NOT DISABLE_OMPD_GDB_PLUGIN) + add_subdirectory(gdb-plugin) + endif() endif() diff --git a/openmp/libompd/gdb-plugin/CMakeLists.txt b/openmp/libompd/gdb-plugin/CMakeLists.txt new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/CMakeLists.txt @@ -0,0 +1,43 @@ +# +#//===----------------------------------------------------------------------===// +#// +#// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +#// See https://llvm.org/LICENSE.txt for license information. +#// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +#// +#//===----------------------------------------------------------------------===// +# + +set (CMAKE_MODULE_PATH + "${CMAKE_SOURCE_DIR}/libompd/" + ${CMAKE_MODULE_PATH} +) + +find_package (Python3 COMPONENTS Interpreter Development) +find_package (PythonLibs REQUIRED) + +include_directories (${OMPD_INCLUDE_PATH}) +include_directories (${LIBOMP_INCLUDE_DIR}) +add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/python-module/ompd/__init__.py + DEPENDS ompdModule.c ompdAPITests.c ompd/frame_filter.py ompd/__init__.py ompd/ompd_address_space.py ompd/ompd_callbacks.py ompd/ompd_handles.py ompd/ompd.py + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/ompd ${CMAKE_CURRENT_BINARY_DIR}/python-module/ompd/ + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + +add_custom_target(ompd_gdb_plugin ALL + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/python-module/ompd/__init__.py + COMMENT "Building the OMPD GDB plugin") + +add_library (ompdModule MODULE ompdModule.c ompdAPITests.c) +include_directories ( + ${LIBOMP_INCLUDE_DIR} + ${LIBOMP_SRC_DIR} + ${Python3_INCLUDE_DIRS} +) +target_link_libraries (ompdModule ${Python3_LIBRARIES}) +target_link_libraries (ompdModule ${CMAKE_DL_LIBS}) + +set_target_properties (ompdModule PROPERTIES PREFIX "") +set_target_properties (ompdModule PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/python-module/ompd/") + +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/python-module/ompd DESTINATION ${CMAKE_INSTALL_PREFIX}/share/gdb/python/ PATTERN ompdModule.so PERMISSIONS OWNER_READ WORLD_READ GROUP_READ OWNER_EXECUTE GROUP_EXECUTE WORLD_EXECUTE) + diff --git a/openmp/libompd/gdb-plugin/README.txt b/openmp/libompd/gdb-plugin/README.txt new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/README.txt @@ -0,0 +1,40 @@ +Instructions to use OpenMP specific debugging support for debugging C/C++ OpenMP programs through the gdb plugin are as follows: +=============================================================================================================================== + + Include libompd.so directory to LD_LIBRARY_PATH + $ export LD_LIBRARY_PATH= or :$LD_LIBRARY_PATH + + Set OMP_DEBUG to enabled + $ export OMP_DEBUG=enabled + + Compile the program to be debugged with '-g' and '-fopenmp' options as shown for a sample C source file xyz.c + $ clang -g -fopenmp xyz.c -o xyz.out + + NOTE: + The program to be debugged needs to have a dynamic link dependency on 'libomp.so' for OpenMP-specific debugging to work correctly. + The user can check this using ldd on the generated binary i.e. xyz.out + + Debug the binary xyz.out by invoking gdb with the plugin as shown below. Please note that plugin '<..>/ompd/__init__.py' should be used + + $ gdb -x or ./xyz.out + + - The gdb command 'help ompd' lists the subcommands available for OpenMP-specific debugging. + - The command 'ompd init' needs to be run first to load the libompd.so available in the $LD_LIBRARY_PATH environment variable, and to initialize the OMPD library. + - The 'ompd init' command starts the program run, and the program stops at a temporary breakpoint at the OpenMP internal location ompd_dll_locations_valid(). + - The user can 'continue' from the temporary breakpoint for further debugging. + - The user may place breakpoints at the OpenMP internal locations 'ompd_bp_thread_begin' and 'ompd_bp_thread_end' to catch the OpenMP thread begin and thread end events. + - Similarly, 'ompd_bp_task_begin' and 'ompd_bp_task_end' breakpoints may be used to catch the OpenMP task begin and task end events; 'ompd_bp_parallel_begin' and 'ompd_bp_parallel_end' to catch OpenMP parallel begin and parallel end events. + + List of OMPD subcommands that can be used in GDB: + - ompd init -- Finds and initializes the OMPD library; looks for the OMPD library libompd.so under $LD_LIBRARY_PATH, and if not found, under the directory in which the OMP library libomp.so is installed. + - ompd icvs -- Displays the values of OpenMP Internal Control Variables. + - ompd parallel -- Displays the details of the current and enclosing parallel regions. + - ompd threads -- Provides information on threads of the current context. + - ompd bt [off | on | on continued] -- Sets the filtering mode for "bt" output on or off, or to trace worker threads back to master threads. When ‘ompd bt on’ is used, the subsequent ‘bt’ command filters out the OpenMP runtime frames to a large extent, displaying only the user application frames. When ‘ompd bt on continued’ is used, the subsequent ‘bt’ command shows the user application frames for the current thread, and continues to trace the thread parents, up to the master thread. + - ompd step -- Executes "step" command into user application frames, skipping OpenMP runtime frames as much as possible. + + +NOTES: + (1) Debugging code that runs on an offloading device is not supported yet. + (2) The OMPD plugin requires an environment with Python version 3.5 or above. The gdb that is used with the OMPD plugin also needs to be based on Python version 3.5 or above. + diff --git a/openmp/libompd/gdb-plugin/ompd/__init__.py b/openmp/libompd/gdb-plugin/ompd/__init__.py new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompd/__init__.py @@ -0,0 +1,15 @@ +import sys +import os.path +import traceback + +if __name__ == "__main__": + try: + sys.path.append(os.path.dirname(__file__)) + + import ompd + ompd.main() + print('OMPD GDB support loaded') + print('Run \'ompd init\' to start debugging') + except Exception as e: + traceback.print_exc() + print('Error: OMPD support could not be loaded', e) diff --git a/openmp/libompd/gdb-plugin/ompd/frame_filter.py b/openmp/libompd/gdb-plugin/ompd/frame_filter.py new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompd/frame_filter.py @@ -0,0 +1,256 @@ +import gdb +import ompdModule +import itertools +from gdb.FrameDecorator import FrameDecorator +import ompd +from ompd_handles import ompd_task, ompd_parallel, ompd_thread +import traceback +from tempfile import NamedTemporaryFile + + +class OmpdFrameDecorator(FrameDecorator): + + def __init__(self, fobj, curr_task_handle): + """Initializes a FrameDecorator with the given GDB Frame object. The global OMPD address space defined in + ompd.py is set as well. + """ + super(OmpdFrameDecorator, self).__init__(fobj) + self.addr_space = ompd.addr_space + self.fobj = None + if isinstance(fobj, gdb.Frame): + self.fobj = fobj + elif isinstance(fobj, FrameDecorator): + self.fobj = fobj.inferior_frame() + self.curr_task_handle = curr_task_handle + + def function(self): + """This appends the name of a frame that is printed with the information whether the task started in the frame + is implicit or explicit. The ICVs are evaluated to determine that. + """ + name = str(self.fobj.name()) + + if self.curr_task_handle is None: + return name + + icv_value = ompdModule.call_ompd_get_icv_from_scope(self.curr_task_handle, ompd.icv_map['implicit-task-var'][1], ompd.icv_map['implicit-task-var'][0]) + if icv_value == 0: + name = '@thread %i: %s "#pragma omp task"' % (gdb.selected_thread().num, name) + elif icv_value == 1: + name = '@thread %i: %s "#pragma omp parallel"' % (gdb.selected_thread().num, name) + else: + name = '@thread %i: %s' % (gdb.selected_thread().num, name) + return name + +class OmpdFrameDecoratorThread(FrameDecorator): + + def __init__(self, fobj): + """Initializes a FrameDecorator with the given GDB Frame object.""" + super(OmpdFrameDecoratorThread, self).__init__(fobj) + if isinstance(fobj, gdb.Frame): + self.fobj = fobj + elif isinstance(fobj, FrameDecorator): + self.fobj = fobj.inferior_frame() + + def function(self): + name = str(self.fobj.name()) + return '@thread %i: %s' % (gdb.selected_thread().num, name) + +class FrameFilter(): + + def __init__(self, addr_space): + """Initializes the FrameFilter, registers is in the GDB runtime and saves the given OMPD address space capsule. + """ + self.addr_space = addr_space + self.name = "Filter" + self.priority = 100 + self.enabled = True + gdb.frame_filters[self.name] = self + self.switched_on = False + self.continue_to_master = False + + def set_switch(self, on_off): + """Prints output when executing 'ompd bt on' or 'ompd bt off'. + """ + self.switched_on = on_off + if self.switched_on: + print('Enabled filter for "bt" output successfully.') + else: + print('Disabled filter for "bt" output successfully.') + + def set_switch_continue(self, on_off): + """Prints output when executing 'ompd bt on continued'." + """ + self.continue_to_master = on_off + if self.continue_to_master: + print('Enabled "bt" mode that continues backtrace on to master thread for worker threads.') + else: + print('Disabled "bt" mode that continues onto master thread.') + + def get_master_frames_for_worker(self, past_thread_num, latest_sp): + """Prints master frames for worker thread with id past_thread_num. + """ + gdb.execute('t 1') + gdb.execute('ompd bt on') + gdb.execute('bt') + + frame = gdb.newest_frame() + + while frame.older() is not None: + print('master frame sp:', str(frame.read_register('sp'))) + yield OmpdFrameDecorator(frame) + frame = frame.older() + print('latest sp:', str(latest_sp)) + + gdb.execute('ompd bt on continued') + gdb.execute('t %d' % int(past_thread_num)) + + + def filter_frames(self, frame_iter): + """Iterates through frames and only returns those that are relevant to the application + being debugged. The OmpdFrameDecorator is applied automatically. + """ + curr_thread_num = gdb.selected_thread().num + is_no_omp_thread = False + if curr_thread_num in self.addr_space.threads: + curr_thread_obj = self.addr_space.threads[curr_thread_num] + self.curr_task = curr_thread_obj.get_current_task() + self.frames = self.curr_task.get_task_frame() + else: + is_no_omp_thread = True + print('Thread %d is no OpenMP thread, printing all frames:' % curr_thread_num) + + stop_iter = False + for x in frame_iter: + if is_no_omp_thread: + yield OmpdFrameDecoratorThread(x) + continue + + if x.inferior_frame().older() is None: + continue + if self.curr_task.task_handle is None: + continue + + gdb_sp = int(str(x.inferior_frame().read_register('sp')), 16) + gdb_sp_next_new = int(str(x.inferior_frame()).split(",")[0].split("=")[1], 16) + if x.inferior_frame().older(): + gdb_sp_next = int(str(x.inferior_frame().older().read_register('sp')), 16) + else: + gdb_sp_next = int(str(x.inferior_frame().read_register('sp')), 16) + while(1): + (ompd_enter_frame, ompd_exit_frame) = self.frames + + if (ompd_enter_frame != 0 and gdb_sp_next_new < ompd_enter_frame): + break + if (ompd_exit_frame != 0 and gdb_sp_next_new < ompd_exit_frame): + if x.inferior_frame().older().older() and int(str(x.inferior_frame().older().older().read_register('sp')), 16) < ompd_exit_frame: + if self.continue_to_master: + yield OmpdFrameDecoratorThread(x) + else: + yield OmpdFrameDecorator(x, self.curr_task.task_handle) + else: + yield OmpdFrameDecorator(x, self.curr_task.task_handle) + break + sched_task_handle = self.curr_task.get_scheduling_task_handle() + + if(sched_task_handle is None): + stop_iter = True + break + + self.curr_task = self.curr_task.get_scheduling_task() + self.frames = self.curr_task.get_task_frame() + if stop_iter: + break + + # implementation of "ompd bt continued" + if self.continue_to_master: + + orig_thread = gdb.selected_thread().num + gdb_threads = dict([(t.num, t) for t in gdb.selected_inferior().threads()]) + + # iterate through generating tasks until outermost task is reached + while(1): + # get OMPD thread id for master thread (systag in GDB output) + try: + master_num = self.curr_task.get_task_parallel().get_thread_in_parallel(0).get_thread_id() + except: + break + # search for thread id without the "l" for long via "thread find" and get GDB thread num from output + hex_str = str(hex(master_num)) + thread_output = gdb.execute('thread find %s' % hex_str[0:len(hex_str)-1], to_string=True).split(" ") + if thread_output[0] == "No": + raise ValueError('Master thread num could not be found!') + gdb_master_num = int(thread_output[1]) + # get task that generated last task of worker thread + try: + self.curr_task = self.curr_task.get_task_parallel().get_task_in_parallel(0).get_generating_task() + except: + break; + self.frames = self.curr_task.get_task_frame() + (enter_frame, exit_frame) = self.frames + if exit_frame == 0: + print('outermost generating task was reached') + break + + # save GDB num for worker thread to change back to it later + worker_thread = gdb.selected_thread().num + + # use InferiorThread.switch() + gdb_threads = dict([(t.num, t) for t in gdb.selected_inferior().threads()]) + gdb_threads[gdb_master_num].switch() + print('#### switching to thread %i ####' % gdb_master_num) + + frame = gdb.newest_frame() + stop_iter = False + + while(not stop_iter): + if self.curr_task.task_handle is None: + break + self.frames = self.curr_task.get_task_frame() + + while frame: + if self.curr_task.task_handle is None: + break + + gdb_sp_next_new = int(str(frame).split(",")[0].split("=")[1], 16) + + if frame.older(): + gdb_sp_next = int(str(frame.older().read_register('sp')), 16) + else: + gdb_sp_next = int(str(frame.read_register('sp')), 16) + + while(1): + (ompd_enter_frame, ompd_exit_frame) = self.frames + + if (ompd_enter_frame != 0 and gdb_sp_next_new < ompd_enter_frame): + break + if (ompd_exit_frame == 0 or gdb_sp_next_new < ompd_exit_frame): + if ompd_exit_frame == 0 or frame.older() and frame.older().older() and int(str(frame.older().older().read_register('sp')), 16) < ompd_exit_frame: + yield OmpdFrameDecoratorThread(frame) + else: + yield OmpdFrameDecorator(frame, self.curr_task.task_handle) + break + sched_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(self.curr_task.task_handle) + + if(sched_task_handle is None): + stop_iter = True + break + self.curr_task = self.curr_task.get_generating_task() + self.frames = self.curr_task.get_task_frame() + + frame = frame.older() + break + + gdb_threads[worker_thread].switch() + + gdb_threads[orig_thread].switch() + + + def filter(self, frame_iter): + """Function is called automatically with every 'bt' executed. If switched on, this will only let revelant frames be printed + or all frames otherwise. If switched on, a FrameDecorator will be applied to state whether '.ompd_task_entry.' refers to an + explicit or implicit task. + """ + if self.switched_on: + return self.filter_frames(frame_iter) + else: + return frame_iter diff --git a/openmp/libompd/gdb-plugin/ompd/ompd.py b/openmp/libompd/gdb-plugin/ompd/ompd.py new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompd/ompd.py @@ -0,0 +1,571 @@ +import ompdModule +import gdb +import re +import traceback +from ompd_address_space import ompd_address_space +from ompd_handles import ompd_thread, ompd_task, ompd_parallel +from frame_filter import FrameFilter +from enum import Enum + + +addr_space = None +ff = None +icv_map = None +ompd_scope_map = {1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'} +in_task_function = False + +class ompd(gdb.Command): + def __init__(self): + super(ompd, self).__init__('ompd', + gdb.COMMAND_STATUS, + gdb.COMPLETE_NONE, + True) + +class ompd_init(gdb.Command): + """Find and initialize ompd library""" + + # first parameter is command-line input, second parameter is gdb-specific data + def __init__(self): + self.__doc__ = 'Find and initialize OMPD library\n usage: ompd init' + super(ompd_init, self).__init__('ompd init', + gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): + global addr_space + global ff + try: + try: + print(gdb.newest_frame()) + except: + gdb.execute("start") + try: + lib_list = gdb.parse_and_eval("(char**)ompd_dll_locations") + except gdb.error: + raise ValueError("No ompd_dll_locations symbol in execution, make sure to have an OMPD enabled OpenMP runtime"); + + while(gdb.parse_and_eval("(char**)ompd_dll_locations") == False): + gdb.execute("tbreak ompd_dll_locations_valid") + gdb.execute("continue") + + lib_list = gdb.parse_and_eval("(char**)ompd_dll_locations") + + i = 0 + while(lib_list[i]): + ret = ompdModule.ompd_open(lib_list[i].string()) + if ret == -1: + raise ValueError("Handle of OMPD library is not a valid string!") + if ret == -2: + print("ret == -2") + pass # It's ok to fail on dlopen + if ret == -3: + print("ret == -3") + pass # It's ok to fail on dlsym + if ret < -10: + raise ValueError("OMPD error code %i!" % (-10 - ret)) + + if ret > 0: + print("Loaded OMPD lib successfully!") + try: + addr_space = ompd_address_space() + ff = FrameFilter(addr_space) + except: + traceback.print_exc() + return + i = i+1 + + raise ValueError("OMPD library could not be loaded!") + except: + traceback.print_exc() + +class ompd_threads(gdb.Command): + """Register thread ids of current context""" + def __init__(self): + self.__doc__ = 'Provide information on threads of current context.\n usage: ompd threads' + super(ompd_threads, self).__init__('ompd threads', + gdb.COMMAND_STATUS) + + def invoke(self, arg, from_tty): + global addr_space + if init_error(): + return + addr_space.list_threads(True) + +def print_parallel_region(curr_parallel, team_size): + """Helper function for ompd_parallel_region. To print out the details of the parallel region.""" + for omp_thr in range(team_size): + thread = curr_parallel.get_thread_in_parallel(omp_thr) + ompd_state = str(addr_space.states[thread.get_state()[0]]) + ompd_wait_id = thread.get_state()[1] + task = curr_parallel.get_task_in_parallel(omp_thr) + task_func_addr = task.get_task_function() + # Get the function this addr belongs to + sal = gdb.find_pc_line(task_func_addr) + block = gdb.block_for_pc(task_func_addr) + while block and not block.function: + block = block.superblock + if omp_thr == 0: + print('%6d (master) %-37s %ld 0x%lx %-25s %-17s:%d' % \ + (omp_thr, ompd_state, ompd_wait_id, task_func_addr, \ + block.function.print_name, sal.symtab.filename, sal.line)) + else: + print('%6d %-37s %ld 0x%lx %-25s %-17s:%d' % \ + (omp_thr, ompd_state, ompd_wait_id, task_func_addr, \ + block.function.print_name, sal.symtab.filename, sal.line)) + +class ompd_parallel_region(gdb.Command): + """Parallel Region Details""" + def __init__(self): + self.__doc__ = 'Display the details of the current and enclosing parallel regions.\n usage: ompd parallel' + super(ompd_parallel_region, self).__init__('ompd parallel', + gdb.COMMAND_STATUS) + + def invoke(self, arg, from_tty): + global addr_space + if init_error(): + return + if addr_space.icv_map is None: + addr_space.get_icv_map() + if addr_space.states is None: + addr_space.enumerate_states() + curr_thread_handle = addr_space.get_curr_thread() + curr_parallel_handle = curr_thread_handle.get_current_parallel_handle() + curr_parallel = ompd_parallel(curr_parallel_handle) + while curr_parallel_handle is not None and curr_parallel is not None: + nest_level = ompdModule.call_ompd_get_icv_from_scope(curr_parallel_handle,\ + addr_space.icv_map['levels-var'][1], addr_space.icv_map['levels-var'][0]) + if nest_level == 0: + break + team_size = ompdModule.call_ompd_get_icv_from_scope(curr_parallel_handle, \ + addr_space.icv_map['team-size-var'][1], \ + addr_space.icv_map['team-size-var'][0]) + print ("") + print ("Parallel Region: Nesting Level %d: Team Size: %d" % (nest_level, team_size)) + print ("================================================") + print ("") + print ("OMP Thread Nbr Thread State Wait Id EntryAddr FuncName File:Line"); + print ("======================================================================================================"); + print_parallel_region(curr_parallel, team_size) + enclosing_parallel = curr_parallel.get_enclosing_parallel() + enclosing_parallel_handle = curr_parallel.get_enclosing_parallel_handle() + curr_parallel = enclosing_parallel + curr_parallel_handle = enclosing_parallel_handle + +class ompd_icvs(gdb.Command): + """ICVs""" + def __init__(self): + self.__doc__ = 'Display the values of the Internal Control Variables.\n usage: ompd icvs' + super(ompd_icvs, self).__init__('ompd icvs', + gdb.COMMAND_STATUS) + + def invoke(self, arg, from_tty): + global addr_space + global ompd_scope_map + if init_error(): + return + curr_thread_handle = addr_space.get_curr_thread() + if addr_space.icv_map is None: + addr_space.get_icv_map() + print("ICV Name Scope Value") + print("===============================================================") + + try: + for icv_name in addr_space.icv_map: + scope = addr_space.icv_map[icv_name][1] + #{1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'} + if scope == 2: + handle = addr_space.addr_space + elif scope == 3: + handle = curr_thread_handle.thread_handle + elif scope == 4: + handle = curr_thread_handle.get_current_parallel_handle() + elif scope == 6: + handle = curr_thread_handle.get_current_task_handle() + else: + raise ValueError("Invalid scope") + + if (icv_name == "nthreads-var" or icv_name == "bind-var"): + icv_value = ompdModule.call_ompd_get_icv_from_scope( + handle, scope, addr_space.icv_map[icv_name][0]) + if icv_value is None: + icv_string = ompdModule.call_ompd_get_icv_string_from_scope( \ + handle, scope, addr_space.icv_map[icv_name][0]) + print('%-31s %-26s %s' % (icv_name, ompd_scope_map[scope], icv_string)) + else: + print('%-31s %-26s %d' % (icv_name, ompd_scope_map[scope], icv_value)) + + elif (icv_name == "affinity-format-var" or icv_name == "run-sched-var" or \ + icv_name == "tool-libraries-var" or icv_name == "tool-verbose-init-var"): + icv_string = ompdModule.call_ompd_get_icv_string_from_scope( \ + handle, scope, addr_space.icv_map[icv_name][0]) + print('%-31s %-26s %s' % (icv_name, ompd_scope_map[scope], icv_string)) + else: + icv_value = ompdModule.call_ompd_get_icv_from_scope(handle, \ + scope, addr_space.icv_map[icv_name][0]) + print('%-31s %-26s %d' % (icv_name, ompd_scope_map[scope], icv_value)) + except: + traceback.print_exc() + +def curr_thread(): + """Helper function for ompd_step. Returns the thread object for the current thread number.""" + global addr_space + if addr_space is not None: + return addr_space.threads[int(gdb.selected_thread().num)] + return None + +class ompd_test(gdb.Command): + """Test area""" + def __init__(self): + self.__doc__ = 'Test functionalities for correctness\n usage: ompd test' + super(ompd_test, self).__init__('ompd test', + gdb.COMMAND_OBSCURE) + + def invoke(self, arg, from_tty): + global addr_space + if init_error(): + return + # get task function for current task of current thread + try: + current_thread = int(gdb.selected_thread().num) + current_thread_obj = addr_space.threads[current_thread] + task_function = current_thread_obj.get_current_task().get_task_function() + print("bt value:", int("0x0000000000400b6c",0)) + print("get_task_function value:", task_function) + + # get task function of implicit task in current parallel region for current thread + current_parallel_obj = current_thread_obj.get_current_parallel() + task_in_parallel = current_parallel_obj.get_task_in_parallel(current_thread) + task_function_in_parallel = task_in_parallel.get_task_function() + print("task_function_in_parallel:", task_function_in_parallel) + except: + print('Task function value not found for this thread') + +class ompdtestapi (gdb.Command): + """ To test API's return code """ + def __init__(self): + self.__doc__ = 'Test OMPD tool Interface APIs.\nUsage: ompdtestapi ' + super (ompdtestapi, self).__init__('ompdtestapi', gdb.COMMAND_OBSCURE) + + def invoke (self, arg, from_tty): + global addr_space + if init_error(): + print ("Error in Initialization."); + return + if not arg: + print ("No API provided to test, eg: ompdtestapi ompd_initialize") + + if arg == "ompd_get_thread_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + ompdModule.test_ompd_get_thread_handle(addr_handle, threadId) + elif arg == "ompd_get_curr_parallel_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + ompdModule.test_ompd_get_curr_parallel_handle(thread_handle) + elif arg == "ompd_get_thread_in_parallel": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + ompdModule.test_ompd_get_thread_in_parallel(parallel_handle) + elif arg == "ompd_thread_handle_compare": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + thread_handle1 = ompdModule.call_ompd_get_thread_in_parallel(parallel_handle, 1); + thread_handle2 = ompdModule.call_ompd_get_thread_in_parallel(parallel_handle, 2); + ompdModule.test_ompd_thread_handle_compare(thread_handle1, thread_handle1) + ompdModule.test_ompd_thread_handle_compare(thread_handle1, thread_handle2) + elif arg == "ompd_get_thread_id": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + ompdModule.test_ompd_get_thread_id(thread_handle) + elif arg == "ompd_rel_thread_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + ompdModule.test_ompd_rel_thread_handle(thread_handle) + elif arg == "ompd_get_enclosing_parallel_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + ompdModule.test_ompd_get_enclosing_parallel_handle(parallel_handle) + elif arg == "ompd_parallel_handle_compare": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle1 = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + parallel_handle2 = ompdModule.call_ompd_get_enclosing_parallel_handle(parallel_handle1) + ompdModule.test_ompd_parallel_handle_compare(parallel_handle1, parallel_handle1) + ompdModule.test_ompd_parallel_handle_compare(parallel_handle1, parallel_handle2) + elif arg == "ompd_rel_parallel_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + ompdModule.test_ompd_rel_parallel_handle(parallel_handle) + elif arg == "ompd_initialize": + ompdModule.test_ompd_initialize() + elif arg == "ompd_get_api_version": + ompdModule.test_ompd_get_api_version() + elif arg == "ompd_get_version_string": + ompdModule.test_ompd_get_version_string() + elif arg == "ompd_finalize": + ompdModule.test_ompd_finalize() + elif arg == "ompd_process_initialize": + ompdModule.call_ompd_initialize() + ompdModule.test_ompd_process_initialize() + elif arg == "ompd_device_initialize": + ompdModule.test_ompd_device_initialize() + elif arg == "ompd_rel_address_space_handle": + ompdModule.test_ompd_rel_address_space_handle() + elif arg == "ompd_get_omp_version": + addr_handle = addr_space.addr_space + ompdModule.test_ompd_get_omp_version(addr_handle) + elif arg == "ompd_get_omp_version_string": + addr_handle = addr_space.addr_space + ompdModule.test_ompd_get_omp_version_string(addr_handle) + elif arg == "ompd_get_curr_task_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + ompdModule.test_ompd_get_curr_task_handle(thread_handle) + elif arg == "ompd_get_task_parallel_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_get_task_parallel_handle(task_handle) + elif arg == "ompd_get_generating_task_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_get_generating_task_handle(task_handle) + elif arg == "ompd_get_scheduling_task_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_get_scheduling_task_handle(task_handle) + elif arg == "ompd_get_task_in_parallel": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + ompdModule.test_ompd_get_task_in_parallel(parallel_handle) + elif arg == "ompd_rel_task_handle": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_rel_task_handle(task_handle) + elif arg == "ompd_task_handle_compare": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle1 = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + task_handle2 = ompdModule.call_ompd_get_generating_task_handle(task_handle1) + ompdModule.test_ompd_task_handle_compare(task_handle1, task_handle2) + ompdModule.test_ompd_task_handle_compare(task_handle2, task_handle1) + elif arg == "ompd_get_task_function": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_get_task_function(task_handle) + elif arg == "ompd_get_task_frame": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_get_task_frame(task_handle) + elif arg == "ompd_get_state": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + ompdModule.test_ompd_get_state(thread_handle) + elif arg == "ompd_get_display_control_vars": + addr_handle = addr_space.addr_space + ompdModule.test_ompd_get_display_control_vars(addr_handle) + elif arg == "ompd_rel_display_control_vars": + ompdModule.test_ompd_rel_display_control_vars() + elif arg == "ompd_enumerate_icvs": + addr_handle = addr_space.addr_space + ompdModule.test_ompd_enumerate_icvs(addr_handle) + elif arg== "ompd_get_icv_from_scope": + addr_handle = addr_space.addr_space + threadId = gdb.selected_thread().ptid[1] + thread_handle = ompdModule.get_thread_handle(threadId, addr_handle) + parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(thread_handle) + task_handle = ompdModule.call_ompd_get_curr_task_handle(thread_handle) + ompdModule.test_ompd_get_icv_from_scope_with_addr_handle(addr_handle) + ompdModule.test_ompd_get_icv_from_scope_with_thread_handle(thread_handle) + ompdModule.test_ompd_get_icv_from_scope_with_parallel_handle(parallel_handle) + ompdModule.test_ompd_get_icv_from_scope_with_task_handle(task_handle) + elif arg == "ompd_get_icv_string_from_scope": + addr_handle = addr_space.addr_space + ompdModule.test_ompd_get_icv_string_from_scope(addr_handle) + elif arg == "ompd_get_tool_data": + ompdModule.test_ompd_get_tool_data() + elif arg == "ompd_enumerate_states": + ompdModule.test_ompd_enumerate_states() + else: + print ("Invalid API.") + + + +class ompd_bt(gdb.Command): + """Turn filter for 'bt' on/off for output to only contain frames relevant to the application or all frames.""" + def __init__(self): + self.__doc__ = 'Turn filter for "bt" output on or off. Specify "on continued" option to trace worker threads back to master threads.\n usage: ompd bt on|on continued|off' + super(ompd_bt, self).__init__('ompd bt', + gdb.COMMAND_STACK) + + def invoke(self, arg, from_tty): + global ff + global addr_space + global icv_map + global ompd_scope_map + if init_error(): + return + if icv_map is None: + icv_map = {} + current = 0 + more = 1 + while more > 0: + tup = ompdModule.call_ompd_enumerate_icvs(addr_space.addr_space, current) + (current, next_icv, next_scope, more) = tup + icv_map[next_icv] = (current, next_scope, ompd_scope_map[next_scope]) + print('Initialized ICV map successfully for filtering "bt".') + + arg_list = gdb.string_to_argv(arg) + if len(arg_list) == 0: + print('When calling "ompd bt", you must either specify "on", "on continued" or "off". Check "help ompd".') + elif len(arg_list) == 1 and arg_list[0] == 'on': + addr_space.list_threads(False) + ff.set_switch(True) + ff.set_switch_continue(False) + elif arg_list[0] == 'on' and arg_list[1] == 'continued': + ff.set_switch(True) + ff.set_switch_continue(True) + elif len(arg_list) == 1 and arg_list[0] == 'off': + ff.set_switch(False) + ff.set_switch_continue(False) + else: + print('When calling "ompd bt", you must either specify "on", "on continued" or "off". Check "help ompd".') + +# TODO: remove +class ompd_taskframes(gdb.Command): + """Prints task handles for relevant task frames. Meant for debugging.""" + def __init__(self): + self.__doc__ = 'Prints list of tasks.\nUsage: ompd taskframes' + super(ompd_taskframes, self).__init__('ompd taskframes', + gdb.COMMAND_STACK) + + def invoke(self, arg, from_tty): + global addr_space + if init_error(): + return + frame = gdb.newest_frame() + while(frame): + print (frame.read_register('sp')) + frame = frame.older() + curr_task_handle = None + if(addr_space.threads and addr_space.threads.get(gdb.selected_thread().num)): + curr_thread_handle = curr_thread().thread_handle + curr_task_handle = ompdModule.call_ompd_get_curr_task_handle(curr_thread_handle) + if(not curr_task_handle): + return None + prev_frames = None + try: + while(1): + frames_with_flags = ompdModule.call_ompd_get_task_frame(curr_task_handle) + frames = (frames_with_flags[0], frames_with_flags[3]) + if(prev_frames == frames): + break + if(not isinstance(frames,tuple)): + break + (ompd_enter_frame, ompd_exit_frame) = frames + print(hex(ompd_enter_frame), hex(ompd_exit_frame)) + curr_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(curr_task_handle) + prev_frames = frames + if(not curr_task_handle): + break + except: + traceback.print_exc() + +def print_and_exec(string): + """Helper function for ompd_step. Executes the given command in GDB and prints it.""" + print(string) + gdb.execute(string) + +class TempFrameFunctionBp(gdb.Breakpoint): + """Helper class for ompd_step. Defines stop function for breakpoint on frame function.""" + def stop(self): + global in_task_function + in_task_function = True + self.enabled = False + +class ompd_step(gdb.Command): + """Executes 'step' and skips frames irrelevant to the application / the ones without debug information.""" + def __init__(self): + self.__doc__ = 'Executes "step" and skips runtime frames as much as possible.' + super(ompd_step, self).__init__('ompd step', gdb.COMMAND_STACK) + + class TaskBeginBp(gdb.Breakpoint): + """Helper class. Defines stop function for breakpoint ompd_bp_task_begin.""" + def stop(self): + try: + code_line = curr_thread().get_current_task().get_task_function() + frame_fct_bp = TempFrameFunctionBp(('*%i' % code_line), temporary=True, internal=True) + frame_fct_bp.thread = self.thread + return False + except: + return False + + def invoke(self, arg, from_tty): + global in_task_function + if init_error(): + return + tbp = self.TaskBeginBp('ompd_bp_task_begin', temporary=True, internal=True) + tbp.thread = int(gdb.selected_thread().num) + print_and_exec('step') + while gdb.selected_frame().find_sal().symtab is None: + if not in_task_function: + print_and_exec('finish') + else: + print_and_exec('si') + +def init_error(): + global addr_space + if (gdb.selected_thread() is None) or (addr_space is None) or (not addr_space): + print("Run 'ompd init' before running any of the ompd commands") + return True + return False + +def main(): + ompd() + ompd_init() + ompd_threads() + ompd_icvs() + ompd_parallel_region() + ompd_test() + ompdtestapi() + ompd_taskframes() + ompd_bt() + ompd_step() + +if __name__ == "__main__": + try: + main() + except: + traceback.print_exc() + +# NOTE: test code using: +# OMP_NUM_THREADS=... gdb a.out -x ../../projects/gdb_plugin/gdb-ompd/__init__.py +# ompd init +# ompd threads diff --git a/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py b/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompd/ompd_address_space.py @@ -0,0 +1,314 @@ +from __future__ import print_function +import ompdModule +from ompd_handles import ompd_thread, ompd_task, ompd_parallel +import gdb +import sys +import traceback +from enum import Enum + +class ompd_scope(Enum): + ompd_scope_global = 1 + ompd_scope_address_space = 2 + ompd_scope_thread = 3 + ompd_scope_parallel = 4 + ompd_scope_implicit_task = 5 + ompd_scope_task = 6 + +class ompd_address_space(object): + + def __init__(self): + """Initializes an ompd_address_space object by calling ompd_initialize + in ompdModule.c + """ + self.addr_space = ompdModule.call_ompd_initialize() + # maps thread_num (thread id given by gdb) to ompd_thread object with thread handle + self.threads = {} + self.states = None + self.icv_map = None + self.ompd_tool_test_bp = None + self.scope_map = {1:'global', 2:'address_space', 3:'thread', 4:'parallel', 5:'implicit_task', 6:'task'} + self.sched_map = {1:'static', 2:'dynamic', 3:'guided', 4:'auto'} + gdb.events.stop.connect(self.handle_stop_event) + self.new_thread_breakpoint = gdb.Breakpoint("ompd_bp_thread_begin", internal=True) + tool_break_symbol = gdb.lookup_global_symbol("ompd_tool_break") + if (tool_break_symbol is not None): + self.ompd_tool_test_bp = gdb.Breakpoint("ompd_tool_break", internal=True) + + def handle_stop_event(self, event): + """Sets a breakpoint at different events, e.g. when a new OpenMP + thread is created. + """ + if (isinstance(event, gdb.BreakpointEvent)): + # check if breakpoint has already been hit + if (self.new_thread_breakpoint in event.breakpoints): + self.add_thread() + gdb.execute('continue') + return + elif (self.ompd_tool_test_bp is not None and self.ompd_tool_test_bp in event.breakpoints): + try: + self.compare_ompt_data() + gdb.execute('continue') + except(): + traceback.print_exc() + elif (isinstance(event, gdb.SignalEvent)): + # TODO: what do we need to do on SIGNALS? + pass + else: + # TODO: probably not possible? + pass + + def get_icv_map(self): + """Fills ICV map. + """ + self.icv_map = {} + current = 0 + more = 1 + while more > 0: + tup = ompdModule.call_ompd_enumerate_icvs(self.addr_space, current) + (current, next_icv, next_scope, more) = tup + self.icv_map[next_icv] = (current, next_scope, self.scope_map[next_scope]) + print('Initialized ICV map successfully for checking OMP API values.') + + def compare_ompt_data(self): + """Compares OMPT tool data about parallel region to data returned by OMPD functions. + """ + # make sure all threads and states are set + self.list_threads(False) + + thread_id = gdb.selected_thread().ptid[1] + curr_thread = self.get_curr_thread() + + # check if current thread is LWP thread; return if "ompd_rc_unavailable" + thread_handle = ompdModule.get_thread_handle(thread_id, self.addr_space) + if thread_handle == -1: + print("Skipping OMPT-OMPD checks for non-LWP thread.") + return + + print('Comparing OMPT data to OMPD data...') + field_names = [i.name for i in gdb.parse_and_eval('thread_data').type.fields()] + thread_data = gdb.parse_and_eval('thread_data') + + if self.icv_map is None: + self.get_icv_map() + + # compare state values + if 'ompt_state' in field_names: + if self.states is None: + self.enumerate_states() + ompt_state = str(thread_data['ompt_state']) + ompd_state = str(self.states[curr_thread.get_state()[0]]) + if ompt_state != ompd_state: + print('OMPT-OMPD mismatch: ompt_state (%s) does not match OMPD state (%s)!' % (ompt_state, ompd_state)) + + # compare wait_id values + if 'ompt_wait_id' in field_names: + ompt_wait_id = thread_data['ompt_wait_id'] + ompd_wait_id = curr_thread.get_state()[1] + if ompt_wait_id != ompd_wait_id: + print('OMPT-OMPD mismatch: ompt_wait_id (%d) does not match OMPD wait id (%d)!' % (ompt_wait_id, ompd_wait_id)) + + # compare thread id + if 'omp_thread_num' in field_names and 'thread-num-var' in self.icv_map: + ompt_thread_num = thread_data['omp_thread_num'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.thread_handle, self.icv_map['thread-num-var'][1], self.icv_map['thread-num-var'][0]) + if ompt_thread_num != icv_value: + print('OMPT-OMPD mismatch: omp_thread_num (%d) does not match OMPD thread num according to ICVs (%d)!' % (ompt_thread_num, icv_value)) + + # compare thread data + if 'ompt_thread_data' in field_names: + ompt_thread_data = thread_data['ompt_thread_data'].dereference()['value'] + ompd_value = ompdModule.call_ompd_get_tool_data(3, curr_thread.thread_handle)[0] + if ompt_thread_data != ompd_value: + print('OMPT-OMPD mismatch: value of ompt_thread_data (%d) does not match that of OMPD data union (%d)!' % (ompt_thread_data, ompd_value)) + + # compare number of threads + if 'omp_num_threads' in field_names and 'team-size-var' in self.icv_map: + ompt_num_threads = thread_data['omp_num_threads'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['team-size-var'][1], self.icv_map['team-size-var'][0]) + if ompt_num_threads != icv_value: + print('OMPT-OMPD mismatch: omp_num_threads (%d) does not match OMPD num threads according to ICVs (%d)!' % (ompt_num_threads, icv_value)) + + # compare omp level + if 'omp_level' in field_names and 'levels-var' in self.icv_map: + ompt_levels = thread_data['omp_level'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['levels-var'][1], self.icv_map['levels-var'][0]) + if ompt_levels != icv_value: + print('OMPT-OMPD mismatch: omp_level (%d) does not match OMPD levels according to ICVs (%d)!' % (ompt_levels, icv_value)) + + # compare active level + if 'omp_active_level' in field_names and 'active-levels-var' in self.icv_map: + ompt_active_levels = thread_data['omp_active_level'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['active-levels-var'][1], self.icv_map['active-levels-var'][0]) + if ompt_active_levels != icv_value: + print('OMPT-OMPD mismatch: active levels (%d) do not match active levels according to ICVs (%d)!' % (ompt_active_levels, icv_value)) + + # compare parallel data + if 'ompt_parallel_data' in field_names: + ompt_parallel_data = thread_data['ompt_parallel_data'].dereference()['value'] + current_parallel_handle = curr_thread.get_current_parallel_handle() + ompd_value = ompdModule.call_ompd_get_tool_data(4, current_parallel_handle)[0] + if ompt_parallel_data != ompd_value: + print('OMPT-OMPD mismatch: value of ompt_parallel_data (%d) does not match that of OMPD data union (%d)!' % (ompt_parallel_data, ompd_value)) + + # compare max threads + if 'omp_max_threads' in field_names and 'nthreads-var' in self.icv_map: + ompt_max_threads = thread_data['omp_max_threads'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.thread_handle, self.icv_map['nthreads-var'][1], self.icv_map['nthreads-var'][0]) + if icv_value is None: + icv_string = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.thread_handle, self.icv_map['nthreads-var'][1], self.icv_map['nthreads-var'][0]) + if icv_string is None: + print('OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (None Object)' % (ompt_max_threads)) + else: + if ompt_max_threads != int(icv_string.split(',')[0]): + print('OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (%d)!' % (ompt_max_threads, int(icv_string.split(',')[0]))) + else: + if ompt_max_threads != icv_value: + print('OMPT-OMPD mismatch: omp_max_threads (%d) does not match OMPD thread limit according to ICVs (%d)!' % (ompt_max_threads, icv_value)) + + # compare omp_parallel + # NOTE: omp_parallel = true if active-levels-var > 0 + if 'omp_parallel' in field_names: + ompt_parallel = thread_data['omp_parallel'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_parallel_handle(), self.icv_map['active-levels-var'][1], self.icv_map['active-levels-var'][0]) + if ompt_parallel == 1 and icv_value <= 0 or ompt_parallel == 0 and icv_value > 0: + print('OMPT-OMPD mismatch: ompt_parallel (%d) does not match OMPD parallel according to ICVs (%d)!' % (ompt_parallel, icv_value)) + + # compare omp_final + if 'omp_final' in field_names and 'final-task-var' in self.icv_map: + ompt_final = thread_data['omp_final'] + current_task_handle = curr_thread.get_current_task_handle() + icv_value = ompdModule.call_ompd_get_icv_from_scope(current_task_handle, self.icv_map['final-task-var'][1], self.icv_map['final-task-var'][0]) + if icv_value != ompt_final: + print('OMPT-OMPD mismatch: omp_final (%d) does not match OMPD final according to ICVs (%d)!' % (ompt_final, icv_value)) + + # compare omp_dynamic + if 'omp_dynamic' in field_names and 'dyn-var' in self.icv_map: + ompt_dynamic = thread_data['omp_dynamic'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.thread_handle, self.icv_map['dyn-var'][1], self.icv_map['dyn-var'][0]) + if icv_value != ompt_dynamic: + print('OMPT-OMPD mismatch: omp_dynamic (%d) does not match OMPD dynamic according to ICVs (%d)!' % (ompt_dynamic, icv_value)) + + # compare omp_max_active_levels + if 'omp_max_active_levels' in field_names and 'max-active-levels-var' in self.icv_map: + ompt_max_active_levels = thread_data['omp_max_active_levels'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_task_handle(), self.icv_map['max-active-levels-var'][1], self.icv_map['max-active-levels-var'][0]) + if ompt_max_active_levels != icv_value: + print('OMPT-OMPD mismatch: omp_max_active_levels (%d) does not match OMPD max active levels (%d)!' % (ompt_max_active_levels, icv_value)) + + # compare omp_kind: TODO: Add the test for monotonic/nonmonotonic modifier + if 'omp_kind' in field_names and 'run-sched-var' in self.icv_map: + ompt_sched_kind = thread_data['omp_kind'] + icv_value = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.get_current_task_handle(), self.icv_map['run-sched-var'][1], self.icv_map['run-sched-var'][0]) + ompd_sched_kind = icv_value.split(',')[0] + if self.sched_map.get(int(ompt_sched_kind)) != ompd_sched_kind: + print('OMPT-OMPD mismatch: omp_kind kind (%s) does not match OMPD schedule kind according to ICVs (%s)!' % (self.sched_map.get(int(ompt_sched_kind)), ompd_sched_kind)) + + # compare omp_modifier + if 'omp_modifier' in field_names and 'run-sched-var' in self.icv_map: + ompt_sched_mod = thread_data['omp_modifier'] + icv_value = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.get_current_task_handle(), self.icv_map['run-sched-var'][1], self.icv_map['run-sched-var'][0]) + token = icv_value.split(',')[1] + if token is not None: + ompd_sched_mod = int(token) + else: + ompd_sched_mod = 0 + if ompt_sched_mod != ompd_sched_mod: + print('OMPT-OMPD mismatch: omp_kind modifier does not match OMPD schedule modifier according to ICVs!') + + # compare omp_proc_bind + if 'omp_proc_bind' in field_names and 'bind-var' in self.icv_map: + ompt_proc_bind = thread_data['omp_proc_bind'] + icv_value = ompdModule.call_ompd_get_icv_from_scope(curr_thread.get_current_task_handle(), self.icv_map['bind-var'][1], self.icv_map['bind-var'][0]) + if icv_value is None: + icv_string = ompdModule.call_ompd_get_icv_string_from_scope(curr_thread.get_current_task_handle(), self.icv_map['bind-var'][1], self.icv_map['bind-var'][0]) + if icv_string is None: + print('OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (None Object)' % (ompt_proc_bind)) + else: + if ompt_proc_bind != int(icv_string.split(',')[0]): + print('OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (%d)!' % (ompt_proc_bind, int(icv_string.split(',')[0]))) + else: + if ompt_proc_bind != icv_value: + print('OMPT-OMPD mismatch: omp_proc_bind (%d) does not match OMPD proc bind according to ICVs (%d)!' % (ompt_proc_bind, icv_value)) + + # compare enter and exit frames + if 'ompt_frame_list' in field_names: + ompt_task_frame_dict = thread_data['ompt_frame_list'].dereference() + ompt_task_frames = (int(ompt_task_frame_dict['enter_frame'].cast(gdb.lookup_type('long'))), int(ompt_task_frame_dict['exit_frame'].cast(gdb.lookup_type('long')))) + current_task = curr_thread.get_current_task() + ompd_task_frames = current_task.get_task_frame() + if ompt_task_frames != ompd_task_frames: + print('OMPT-OMPD mismatch: ompt_task_frames (%s) do not match OMPD task frames (%s)!' % (ompt_task_frames, ompd_task_frames)) + + # compare task data + if 'ompt_task_data' in field_names: + ompt_task_data = thread_data['ompt_task_data'].dereference()['value'] + current_task_handle = curr_thread.get_current_task_handle() + ompd_value = ompdModule.call_ompd_get_tool_data(6, current_task_handle)[0] + if ompt_task_data != ompd_value: + print('OMPT-OMPD mismatch: value of ompt_task_data (%d) does not match that of OMPD data union (%d)!' % (ompt_task_data, ompd_value)) + + def save_thread_object(self, thread_num, thread_id, addr_space): + """Saves thread object for thread_num inside threads dictionary. + """ + thread_handle = ompdModule.get_thread_handle(thread_id, addr_space) + self.threads[int(thread_num)] = ompd_thread(thread_handle) + + def get_thread(self, thread_num): + """ Get thread object from map. + """ + return self.threads[int(thread_num)] + + def get_curr_thread(self): + """ Get current thread object from map or add new one to map, if missing. + """ + thread_num = int(gdb.selected_thread().num) + if thread_num not in self.threads: + self.add_thread() + return self.threads[thread_num] + + def add_thread(self): + """Add currently selected (*) thread to dictionary threads. + """ + inf_thread = gdb.selected_thread() + try: + self.save_thread_object(inf_thread.num, inf_thread.ptid[1], self.addr_space) + except: + traceback.print_exc() + + def list_threads(self, verbose): + """Prints OpenMP threads only that are being tracking inside the "threads" dictionary. + See handle_stop_event and add_thread. + """ + list_tids = [] + curr_inferior = gdb.selected_inferior() + + for inf_thread in curr_inferior.threads(): + list_tids.append((inf_thread.num, inf_thread.ptid)) + if verbose: + if self.states is None: + self.enumerate_states() + for (thread_num, thread_ptid) in sorted(list_tids): + if thread_num in self.threads: + try: + print('Thread %i (%i) is an OpenMP thread; state: %s' % (thread_num, thread_ptid[1], self.states[self.threads[thread_num].get_state()[0]])) + except: + traceback.print_exc() + else: + print('Thread %i (%i) is no OpenMP thread' % (thread_num, thread_ptid[1])) + + def enumerate_states(self): + """Helper function for list_threads: initializes map of OMPD states for output of + 'ompd threads'. + """ + if self.states is None: + self.states = {} + current = int("0x102", 0) + count = 0 + more = 1 + + while more > 0: + tup = ompdModule.call_ompd_enumerate_states(self.addr_space, current) + (next_state, next_state_name, more) = tup + + self.states[next_state] = next_state_name + current = next_state diff --git a/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py b/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompd/ompd_callbacks.py @@ -0,0 +1,96 @@ +import gdb +import os +import re +import traceback +import sys + +""" This module evaluates function parameters of those OMPD callbacks that need GDB API calls. +""" + +""" Have the debugger print a string. +""" +def _print(*args): + # args is a tuple with just one string element + print_string = args[0] + gdb.execute('printf "%s\n"' % args[0]) + +""" Look up the address of a global symbol in the target. +""" +def _sym_addr(*args): + # args is a tuple consisting of thread_id and symbol_name + thread_id = args[0] + symbol_name = args[1] + if(thread_id >= 0): + gdb.execute('thread %d\n' % thread_id, to_string=True) + return int(gdb.parse_and_eval("&"+symbol_name)) + +""" Read string from the target and copy it into the provided buffer. +""" +def _read_string(*args): + # args is a tuple with just the source address + addr = args[0] + try: + buf = gdb.parse_and_eval('(unsigned char*)%li' % addr).string() + except: + traceback.print_exc() + return buf + +""" Read memory from the target and copy it into the provided buffer. +""" +def _read(*args): + # args is a tuple consisting of address and number of bytes to be read + addr = args[0] + nbytes = args[1] +# print("_read(%i,%i)"%(addr, nbytes)) + ret_buf = bytearray() +# try: + buf = gdb.parse_and_eval('(unsigned char*)%li' % addr) + for i in range(nbytes): + ret_buf.append(int(buf[i])) +# except: +# traceback.print_exc() + return ret_buf + + +""" Get thread-specific context. +Return -1 if no match is found. +""" +def _thread_context(*args): + # args is a tuple consisting of thread_id and the thread kind + thread_id = args[1] + pthread = False + lwp = False + if args[0] == 0: + pthread = True + else: + lwp = True + info = gdb.execute('info threads', to_string=True).splitlines() + + for line in info: + if pthread: + m = re.search(r'(0x[a-fA-F0-9]+)', line) + elif lwp: + m = re.search(r'\([^)]*?(\d+)[^)]*?\)', line) + if m == None: + continue + pid = int(m.group(1),0) + if pid == thread_id: + return int(line[2:6],0) + return -1 + +""" Test info threads / list threads / how to split output to get thread id +and its size. +""" +def _test_threads(*args): + info = gdb.execute('info threads', to_string=True).splitlines() + for line in info[1:]: + content = line.split() + thread_id = None + # fetch pointer to id + if(content[0].startswith('*')): + thread_id = (content[3]) + else: + thread_id = (content[2]) + sizeof_tid = sys.getsizeof(thread_id) + print(sizeof_tid) + print(info) diff --git a/openmp/libompd/gdb-plugin/ompd/ompd_handles.py b/openmp/libompd/gdb-plugin/ompd/ompd_handles.py new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompd/ompd_handles.py @@ -0,0 +1,178 @@ +import ompdModule +import imp + +class ompd_parallel(object): + + def __init__(self, parallel_handle): + """ Initializes an ompd_parallel object with the pointer + to a handle of a parallel region.""" + self.parallel_handle = parallel_handle + self.threads = {} + self.itasks = {} + self.enclosing_parallel_handle = None + self.enclosing_parallel = False + self.task_handle = None + + def get_thread_in_parallel(self, thread_num): + """Obtains thread handles for the threads associated with the + parallel region specified by parallel_handle.""" + if not thread_num in self.threads: + thread_handle = ompdModule.call_ompd_get_thread_in_parallel(self.parallel_handle, thread_num) + self.threads[thread_num] = ompd_thread(thread_handle) + return self.threads[thread_num] + + def get_enclosing_parallel_handle(self): + """Obtains a parallel handle for the parallel region enclosing + the parallel region specified by parallel_handle.""" + if not self.enclosing_parallel_handle: + self.enclosing_parallel_handle = ompdModule.call_ompd_get_enclosing_parallel_handle(self.parallel_handle) + return self.enclosing_parallel_handle + + def get_enclosing_parallel(self): + if not self.enclosing_parallel: + self.enclosing_parallel = ompd_parallel(self.get_enclosing_parallel_handle()) + return self.enclosing_parallel + + def get_task_in_parallel(self, thread_num): + """Obtains handles for the implicit tasks associated with the + parallel region specified by parallel_handle.""" + if not thread_num in self.itasks: + task_handle = ompdModule.call_ompd_get_task_in_parallel(self.parallel_handle, thread_num) + self.itasks[thread_num] = ompd_task(task_handle) + return self.itasks[thread_num] + + def __del__(self): + """Releases the parallel handle.""" + pass # let capsule destructors do the job + +class ompd_task(object): + + def __init__(self, task_handle): + """Initializes a new ompd_task_handle object and sets the attribute + to the task handle specified.""" + self.task_handle = task_handle + self.task_parallel_handle = False + self.generating_task_handle = False + self.scheduling_task_handle = False + self.task_parallel = False + self.generating_task = False + self.scheduling_task = False + self.task_frames = None + self.task_frame_flags = None + + def get_task_parallel_handle(self): + """Obtains a task parallel handle for the parallel region enclosing + the task region specified.""" + if not self.task_parallel_handle: + self.task_parallel_handle = ompdModule.call_ompd_get_task_parallel_handle(self.task_handle) + return self.task_parallel_handle + + def get_task_parallel(self): + if not self.task_parallel: + self.task_parallel = ompd_parallel(self.get_task_parallel_handle()) + return self.task_parallel + + def get_generating_task_handle(self): + """Obtains the task handle for the task that created the task specified + by the task handle.""" + if not self.generating_task_handle: + self.generating_task_handle = ompdModule.call_ompd_get_generating_task_handle(self.task_handle) + return self.generating_task_handle + + def get_generating_task(self): + if not self.generating_task: + self.generating_task = ompd_task(ompdModule.call_ompd_get_generating_task_handle(self.task_handle)) + return self.generating_task + + def get_scheduling_task_handle(self): + """Obtains the task handle for the task that scheduled the task specified.""" + if not self.scheduling_task_handle: + self.scheduling_task_handle = ompdModule.call_ompd_get_scheduling_task_handle(self.task_handle) + return self.scheduling_task_handle + + def get_scheduling_task(self): + """Returns ompd_task object for the task that scheduled the current task.""" + if not self.scheduling_task: + self.scheduling_task = ompd_task(self.get_scheduling_task_handle()) + return self.scheduling_task + + def get_task_function(self): + """Returns long with address of function entry point.""" + return ompdModule.call_ompd_get_task_function(self.task_handle) + + def get_task_frame_with_flags(self): + """Returns enter frame address and flag, exit frame address and flag for current task handle.""" + if self.task_frames is None or self.task_frame_flags is None: + ret_value = ompdModule.call_ompd_get_task_frame(self.task_handle) + if isinstance(ret_value, tuple): + self.task_frames = (ret_value[0], ret_value[2]) + self.task_frame_flags = (ret_value[1], ret_value[3]) + else: + return ret_value + return (self.task_frames[0], self.task_frame_flags[0], self.task_frames[1], self.task_frame_flags[1]) + + def get_task_frame(self): + """Returns enter and exit frame address for current task handle.""" + if self.task_frames is None: + ret_value = ompdModule.call_ompd_get_task_frame(self.task_handle) + if isinstance(ret_value, tuple): + self.task_frames = (ret_value[0], ret_value[2]) + else: + return ret_value + return self.task_frames + + def __del__(self): + """Releases the task handle.""" + pass # let capsule destructors do the job + + +class ompd_thread(object): + + def __init__(self, thread_handle): + """Initializes an ompd_thread with the data received from + GDB.""" + self.thread_handle = thread_handle + self.parallel_handle = None + self.task_handle = None + self.current_task = False + self.current_parallel = False + self.thread_id = False + + def get_current_parallel_handle(self): + """Obtains the parallel handle for the parallel region associated with + the given thread handle.""" + #TODO: invalidate thread objects based on `gdb.event.cont`. This should invalidate all internal state. + self.parallel_handle = ompdModule.call_ompd_get_curr_parallel_handle(self.thread_handle) + return self.parallel_handle + + def get_current_parallel(self): + """Returns parallel object for parallel handle of the parallel region + associated with the current thread handle.""" + if not self.current_parallel: + self.current_parallel = ompd_parallel(self.get_current_parallel_handle()) + return self.current_parallel + + def get_current_task_handle(self): + """Obtains the task handle for the current task region of the + given thread.""" + return ompdModule.call_ompd_get_curr_task_handle(self.thread_handle) + + def get_thread_id(self): + """Obtains the ID for the given thread.""" + if not self.thread_id: + self.thread_id = ompdModule.call_ompd_get_thread_id(self.thread_handle) + return self.thread_id + + def get_current_task(self): + """Returns task object for task handle of the current task region.""" + return ompd_task(self.get_current_task_handle()) + + def get_state(self): + """Returns tuple with OMPD state (long) and wait_id, in case the thread is in a + waiting state. Helper function for 'ompd threads' command.""" + (state, wait_id) = ompdModule.call_ompd_get_state(self.thread_handle) + return (state, wait_id) + + def __del__(self): + """Releases the given thread handle.""" + pass # let capsule destructors do the job diff --git a/openmp/libompd/gdb-plugin/ompdAPITests.c b/openmp/libompd/gdb-plugin/ompdAPITests.c new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompdAPITests.c @@ -0,0 +1,2595 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +extern void *ompd_library; + +struct _ompd_aspace_cont { + int id; +}; +struct _ompd_thread_cont { + int id; +}; +ompd_address_space_context_t context = {42}; +ompd_address_space_context_t invalidcontext = {99}; + +// call back functions for ompd_initialize +ompd_rc_t _alloc(ompd_size_t bytes, void **ptr); +ompd_rc_t _free(void *ptr); +ompd_rc_t _sizes(ompd_address_space_context_t *_acontext, + ompd_device_type_sizes_t *sizes); +ompd_rc_t _sym_addr(ompd_address_space_context_t *context, + ompd_thread_context_t *tcontext, const char *symbol_name, + ompd_address_t *symbol_addr, const char *file_name); +ompd_rc_t _read(ompd_address_space_context_t *context, + ompd_thread_context_t *tcontext, const ompd_address_t *addr, + ompd_size_t nbytes, void *buffer); +ompd_rc_t _read_string(ompd_address_space_context_t *context, + ompd_thread_context_t *tcontext, + const ompd_address_t *addr, ompd_size_t nbytes, + void *buffer); +ompd_rc_t _endianess(ompd_address_space_context_t *address_space_context, + const void *input, ompd_size_t unit_size, + ompd_size_t count, void *output); +ompd_rc_t _thread_context(ompd_address_space_context_t *context, + ompd_thread_id_t kind, ompd_size_t sizeof_thread_id, + const void *thread_id, + ompd_thread_context_t **thread_context); +ompd_rc_t _print(const char *str, int category); + +/* + Test API: ompd_get_thread_handle + + ompdtestapi threadandparallel + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + c + ompdtestapi ompd_get_thread_handle + + for ompd_rc_unavailable: + ompd init + ompdtestapi ompd_get_thread_handle +*/ + +PyObject *test_ompd_get_thread_handle(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_thread_handle\"...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + PyObject *threadIdTup = PyTuple_GetItem(args, 1); + uint64_t threadID = (uint64_t)PyLong_AsLong(threadIdTup); + + ompd_size_t sizeof_thread_id = sizeof(threadID); + ompd_thread_handle_t *thread_handle; + + // should be successful + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_thread_handle( + addr_handle, 1 /*lwp*/, sizeof_thread_id, &threadID, &thread_handle); + + if (rc == ompd_rc_unavailable) { + // ompd_rc_unavailable if the thread is not an OpenMP thread. + printf("Success. ompd_rc_unavailable, OpenMP is disabled.\n"); + printf("This is not a Parallel Region, No more testing is possible.\n"); + return Py_None; + } else if (rc != ompd_rc_ok) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // as in ompd-types.h, only 0-3 are valid for thread kind + // ompd_rc_unsupported if thread kind is not supported. + printf("Test: Unsupported thread kind.\n"); + rc = ompd_get_thread_handle(addr_handle, 4, sizeof_thread_id, &threadID, + &thread_handle); + if (rc != ompd_rc_unsupported) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // ompd_rc_bad_input: if a different value in sizeof_thread_id is expected for + // a thread kind. + // sizeof_thread_id is validated at thread_context which is call back function + // "_thread_context" where we expect size to be sizeof(long int) + printf("Test: Wrong value for sizeof threadID.\n"); + rc = ompd_get_thread_handle(addr_handle, 1 /*lwp*/, sizeof_thread_id - 1, + &threadID, &thread_handle); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL thread_handle.\n"); + rc = ompd_get_thread_handle(addr_handle, 1 /*lwp*/, sizeof_thread_id, + &threadID, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale_handle for NULL addr_handle.\n"); + rc = ompd_get_thread_handle(NULL, 1 /*lwp*/, sizeof_thread_id, &threadID, + &thread_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_curr_parallel_handle. + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_get_curr_parallel_handle + + for ompd_rc_unavailable + ompd init + omptestapi ompd_get_curr_parallel_handle (or break at line 4 + before this) +*/ + +PyObject *test_ompd_get_curr_parallel_handle(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_curr_parallel_handle\"...\n"); + + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *thread_handle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + + ompd_parallel_handle_t *parallel_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_curr_parallel_handle(thread_handle, ¶llel_handle); + if (rc == ompd_rc_unavailable) { + // ompd_rc_unavailable if the thread is not currently part of a team + + // ToCheck: Even in non parallel region, error code is stale_handle + // Need to find a test case for ompd_rc_unavailable ????? + printf("Success. ompd_rc_unavailable, Not in parallel region\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc == ompd_rc_stale_handle) { + printf("Return code is stale_handle, may be in non-parallel region.\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc != ompd_rc_ok) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL parallel_handle.\n"); + rc = ompd_get_curr_parallel_handle(thread_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "thread_handle.\n"); + rc = ompd_get_curr_parallel_handle(NULL, ¶llel_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_thread_in_parallel. + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(3); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_get_thread_in_parallel +*/ +PyObject *test_ompd_get_thread_in_parallel(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_thread_in_parallel\"...\n"); + + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallel_handle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + ompd_thread_handle_t *thread_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_thread_in_parallel( + parallel_handle, 1 /* lesser than team-size-var*/, &thread_handle); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // ompd_rc_bad_input: if the thread_num argument is greater than or equal to + // the team-size-var ICV or negative + printf("Test: Invalid thread num (199).\n"); + rc = ompd_get_thread_in_parallel(parallel_handle, 199, &thread_handle); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Invalid thread num (-5).\n"); + rc = ompd_get_thread_in_parallel(parallel_handle, -5, &thread_handle); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL thread_handle.\n"); + rc = ompd_get_thread_in_parallel(parallel_handle, 1, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "parallel_handle.\n"); + rc = ompd_get_thread_in_parallel(NULL, 1, &thread_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_thread_handle_compare. + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(4); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_thread_handle_compare +*/ + +PyObject *test_ompd_thread_handle_compare(PyObject *self, PyObject *args) { + printf("Testing \"ompd_thread_handle_compare\"...\n"); + + PyObject *threadHandlePy1 = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *thread_handle1 = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy1, + "ThreadHandle")); + PyObject *threadHandlePy2 = PyTuple_GetItem(args, 1); + ompd_thread_handle_t *thread_handle2 = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy2, + "ThreadHandle")); + + int cmp_value; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = + ompd_thread_handle_compare(thread_handle1, thread_handle2, &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + if (cmp_value == 0) { + printf("Threads are Equal.\n"); + } else { + // a value less than, equal to, or greater than 0 indicates that the thread + // corresponding to thread_handle_1 is, respectively, less than, equal to, + // or greater than that corresponding to thread_handle_2. + if (cmp_value <= 0) { + printf("Thread 1 is lesser than thread 2, cmp_val = %d\n", cmp_value); + printf("Test: Changing the order.\n"); + rc = ompd_thread_handle_compare(thread_handle2, thread_handle1, + &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } + if (cmp_value >= 0) + printf("Success now cmp_value is greater, %d.\n", cmp_value); + else + printf("Failed.\n"); + } else { + printf("Thread 1 is greater than thread 2.\n"); + printf("Test: Changing the order.\n"); + rc = ompd_thread_handle_compare(thread_handle2, thread_handle1, + &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } + if (cmp_value <= 0) + printf("Success now cmp_value is lesser, %d.\n", cmp_value); + else + printf("Failed.\n"); + } + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL cmp_value.\n"); + rc = ompd_thread_handle_compare(thread_handle2, thread_handle1, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "thread_handle.\n"); + rc = ompd_thread_handle_compare(NULL, thread_handle1, &cmp_value); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + } + + return Py_None; +} + +/* + Test API: ompd_get_thread_id. + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_get_thread_id +*/ + +PyObject *test_ompd_get_thread_id(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_thread_id\"...\n"); + + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *thread_handle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + + uint64_t threadID; + ompd_size_t sizeof_thread_id = sizeof(threadID); + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_thread_id(thread_handle, 0 /*OMPD_THREAD_ID_PTHREAD*/, + sizeof_thread_id, &threadID); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success. Thread id = %ld\n", threadID); + + // ompd_rc_bad_input: if a different value in sizeof_thread_id is expected for + // a thread kind of kind + printf("Test: Wrong sizeof_thread_id.\n"); + rc = ompd_get_thread_id(thread_handle, 0 /*OMPD_THREAD_ID_PTHREAD*/, + sizeof_thread_id - 1, &threadID); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // ompd_rc_unsupported: if the kind of thread is not supported + printf("Test: Unsupported thread kind.\n"); + // thread kind currently support from 0-3, refer in ompd-types.h + rc = ompd_get_thread_id(thread_handle, 4, sizeof_thread_id - 1, &threadID); + if (rc != ompd_rc_unsupported) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL threadID.\n"); + rc = ompd_get_thread_id(thread_handle, 0 /*OMPD_THREAD_ID_PTHREAD*/, + sizeof_thread_id, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error for NULL thread_handle.\n"); + rc = ompd_get_thread_id(NULL, 0 /*OMPD_THREAD_ID_PTHREAD*/, sizeof_thread_id, + &threadID); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_rel_thread_handle + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_rel_thread_handle +*/ + +// TODO: This might not be the right way to do,as this handle comes from +// python not generated by ompd API + +PyObject *test_ompd_rel_thread_handle(PyObject *self, PyObject *args) { + printf("Testing Not enabled for \"ompd_rel_thread_handle\"...\n"); + printf("Disabled.\n"); + return Py_None; +} + +/* + Test API: ompd_get_enclosing_parallel_handle. + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. omp_set_num_threads(3); + 9. #pragma omp parallel + 10. { + 11. printf ("Parallel level 2, thread num = %d", + omp_get_thread_num()); + 12. } + 13. } + 14. return 0; + 15. } + + GDB Commands: + ompd init + b 11 + ompdtestapi ompd_get_enclosing_parallel_handle + + for "ompd_rc_unavailable": + ompd init + omptestapi ompd_get_enclosing_parallel_handle + (or break at line 4 before this) +*/ + +PyObject *test_ompd_get_enclosing_parallel_handle(PyObject *self, + PyObject *args) { + printf("Testing \"ompd_get_enclosing_parallel_handle\"...\n"); + + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallel_handle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + ompd_parallel_handle_t *enclosing_parallel_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_enclosing_parallel_handle(parallel_handle, + &enclosing_parallel_handle); + if (rc == ompd_rc_unavailable) { + // ompd_rc_unavailable: if no enclosing parallel region exists. + printf("Success. return code is ompd_rc_unavailable, Not in parallel " + "region\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL " + "enclosing_parallel_handle.\n"); + rc = ompd_get_enclosing_parallel_handle(parallel_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "parallel_handle.\n"); + rc = ompd_get_enclosing_parallel_handle(NULL, &enclosing_parallel_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_parallel_handle_compare. + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. omp_set_num_threads(3); + 9. #pragma omp parallel + 10. { + 11. printf ("Parallel level 2, thread num = %d", + omp_get_thread_num()); + 12. } + 13. } + 14. return 0; + 15. } + + GDB Commands: + ompd init + b 11 + ompdtestapi ompd_parallel_handle_compare +*/ + +PyObject *test_ompd_parallel_handle_compare(PyObject *self, PyObject *args) { + printf("Testing \"ompd_parallel_handle_compare\"...\n"); + + PyObject *parallelHandlePy1 = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallel_handle1 = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy1, + "ParallelHandle")); + PyObject *parallelHandlePy2 = PyTuple_GetItem(args, 1); + ompd_parallel_handle_t *parallel_handle2 = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy2, + "ParallelHandle")); + + int cmp_value; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_parallel_handle_compare(parallel_handle1, + parallel_handle2, &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + if (cmp_value == 0) { + printf("Parallel regions are Same.\n"); + } else { + // A value less than, equal to, or greater than 0 indicates that the region + // corresponding to parallel_handle_1 is, respectively, less than, equal to, + // or greater than that corresponding to parallel_handle_2 + if (cmp_value <= 0) { + printf("Parallel handle 1 is lesser than handle 2, cmp_val = %d\n", + cmp_value); + printf("Test: Changing the order.\n"); + rc = ompd_parallel_handle_compare(parallel_handle2, parallel_handle1, + &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } + if (cmp_value >= 0) + printf("Success now cmp_value is greater, %d.\n", cmp_value); + else + printf("Failed.\n"); + } else { + printf("Parallel 1 is greater than handle 2.\n"); + printf("Test: Changing the order.\n"); + rc = ompd_parallel_handle_compare(parallel_handle2, parallel_handle1, + &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } + if (cmp_value <= 0) + printf("Success now cmp_value is lesser, %d.\n", cmp_value); + else + printf("Failed.\n"); + } + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL cmp_value.\n"); + rc = ompd_parallel_handle_compare(parallel_handle2, parallel_handle1, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "thread_handle.\n"); + rc = ompd_parallel_handle_compare(NULL, parallel_handle1, &cmp_value); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + } + + return Py_None; +} + +/* + Test API: ompd_rel_parallel_handle + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_rel_parallel_handle +*/ + +// TODO: Same as thread_rel_handle, might not be a right way to test +// What released should be provided by ompd API, this address is actually from +// python +PyObject *test_ompd_rel_parallel_handle(PyObject *self, PyObject *args) { + printf("Testing NOT enabled for \"ompd_rel_parallel_handle\"...\n"); + printf("Disabled.\n"); + return Py_None; +} + +/* + Test API: ompd_initialize + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + b 4 + ompdtestapi ompd_initialize\ +*/ +PyObject *test_ompd_initialize(PyObject *self, PyObject *noargs) { + printf("Testing \"test_ompd_initialize\"...\n"); + + ompd_word_t version; + ompd_rc_t rc = ompd_get_api_version(&version); + if (rc != ompd_rc_ok) { + printf("Failed in \"ompd_get_api_version\".\n"); + return Py_None; + } + + static ompd_callbacks_t table = { + _alloc, _free, _print, _sizes, _sym_addr, _read, + NULL, _read_string, _endianess, _endianess, _thread_context}; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t (*my_ompd_init)(ompd_word_t version, ompd_callbacks_t *) = + dlsym(ompd_library, "ompd_initialize"); + rc = my_ompd_init(version, &table); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + static ompd_callbacks_t invalid_table = { + NULL, /* _alloc, */ + NULL, /* _free, */ + NULL, /* _print,*/ + NULL, /* _sizes, */ + NULL, /* _sym_addr, */ + NULL, /* _read,*/ + NULL, NULL, /* _read_string, */ + NULL, /* _endianess, */ + NULL, /* _endianess, */ + NULL, /* _thread_context */ + }; + + // ompd_rc_bad_input: if invalid callbacks are provided + printf("Test: Invalid callbacks.\n"); + rc = my_ompd_init(version, &invalid_table); + if (rc != ompd_rc_bad_input) + printf("Warning, with return code = %d\n", rc); + else + printf("Success.\n"); + + // ompd_rc_unsupported: if the requested API version cannot be provided + printf("Test: Wrong API version.\n"); + rc = my_ompd_init(150847, &table); + if (rc != ompd_rc_unsupported) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL table.\n"); + rc = my_ompd_init(version, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or ompd_rc_bad_input for NULL\n"); + rc = my_ompd_init(0, &table); + if (rc != ompd_rc_unsupported && rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_api_version + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + ompdtestapi ompd_get_version + +*/ + +PyObject *test_ompd_get_api_version(PyObject *self, PyObject *noargs) { + printf("Testing \"ompd_get_api_version\"...\n"); + + ompd_word_t version; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_api_version(&version); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success. API version is %ld\n", version); + + printf( + "Test: Expecting ompd_rc_error or ompd_rc_bad_input for NULL version\n"); + rc = ompd_get_api_version(NULL); + if (rc != ompd_rc_error && rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_version_string + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + omptestapi ompd_get_version_string + +*/ + +PyObject *test_ompd_get_version_string(PyObject *self, PyObject *noargs) { + printf("Testing \"ompd_get_version_string\"...\n"); + + const char *string; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_version_string(&string); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success. API version is %s\n", string); + + printf( + "Test: Expecting ompd_rc_error or ompd_rc_bad_input for NULL version\n"); + rc = ompd_get_version_string(NULL); + if (rc != ompd_rc_error && rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_finalize + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + ompdtestapi ompd_finalize + + + b 4 + r + ompdtestapi ompd_finalize +*/ + +PyObject *test_ompd_finalize(PyObject *self, PyObject *noargs) { + printf("Testing \"ompd_finalize\"...\n"); + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_finalize(); + if (rc == ompd_rc_ok) + printf("Ret code: ompd_rc_ok, Success if ompd is initialized.\n"); + // ompd_rc_unsupported: if the OMPD library is not initialized. + else if (rc == ompd_rc_unsupported) + printf( + "Ret code: ompd_rc_unsupported, Success if ompd is NOT initialized.\n"); + else + printf("Failed: Return code is %d.\n", rc); + + return Py_None; +} + +/* + Test API: ompd_process_initialize + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + +*/ + +PyObject *test_ompd_process_initialize(PyObject *self, PyObject *noargs) { + + printf("Testing \"ompd_process_initialize\"....\n"); + + ompd_address_space_handle_t *addr_handle; + + // ompd_address_space_context_t context = {42}; + + printf("Test: with correct Args.\n"); + ompd_rc_t rc = ompd_process_initialize(&context, &addr_handle); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + printf("Test: With Unsupported library.\n"); + printf("Warning: Have to test manually with 32 and 64 bit combination.\n"); + + // ompd_address_space_context_t invalidcontext = {99}; + printf("Test: with wrong context value.\n"); + rc = ompd_process_initialize(&invalidcontext, &addr_handle); + if ((rc != ompd_rc_bad_input) && (rc != ompd_rc_incompatible) && + (rc != ompd_rc_stale_handle)) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting stale handle or bad_input for NULL addr_handle.\n"); + rc = ompd_process_initialize(&context, NULL); + if ((rc != ompd_rc_bad_input) && (rc != ompd_rc_stale_handle)) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_device_initialize + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + +*/ + +PyObject *test_ompd_device_initialize(PyObject *self, PyObject *noargs) { + printf("Testing Not enabled for \"ompd_device_initialize\".\n"); + printf("Disabled.\n"); + + return Py_None; +} + +/* + Test API: ompd_rel_address_space_handle + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + +*/ +PyObject *test_ompd_rel_address_space_handle(PyObject *self, PyObject *noargs) { + printf("Testing Not enabled for \"ompd_rel_address_space_handle\".\n"); + printf("Disabled.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_omp_version + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_omp_version +*/ +PyObject *test_ompd_get_omp_version(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_omp_version\" ...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + ompd_word_t omp_version; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_omp_version(addr_handle, &omp_version); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success. API version is %ld\n", omp_version); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting stale handle or bad_input for NULL addr_handle.\n"); + rc = ompd_get_omp_version(NULL, &omp_version); + if ((rc != ompd_rc_bad_input) && (rc != ompd_rc_stale_handle)) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or bad_input for NULL omp_version.\n"); + rc = ompd_get_omp_version(addr_handle, NULL); + if (rc != ompd_rc_error && rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_omp_version_string + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + ompdtestapi ompd_get_omp_version_string +*/ +PyObject *test_ompd_get_omp_version_string(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_omp_version_string\" ...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + const char *string; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_omp_version_string(addr_handle, &string); + if (rc != ompd_rc_ok) { + printf("Failed, with return code = %d\n", rc); + return Py_None; + } else + printf("Success. API version is %s\n", string); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting stale handle or bad_input for NULL addr_handle.\n"); + rc = ompd_get_omp_version_string(NULL, &string); + if ((rc != ompd_rc_bad_input) && (rc != ompd_rc_stale_handle)) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or bad_input for NULL omp_version.\n"); + rc = ompd_get_omp_version_string(addr_handle, NULL); + if (rc != ompd_rc_error && rc != ompd_rc_bad_input) + printf("Failed, with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_curr_task_handle + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_curr_task_handle +*/ + +PyObject *test_ompd_get_curr_task_handle(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_curr_task_handle\"...\n"); + + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *thread_handle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + + ompd_task_handle_t *task_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_curr_task_handle(thread_handle, &task_handle); + if (rc == ompd_rc_unavailable) { + // ompd_rc_unavailable if the thread is not currently executing a task + + printf( + "Success. Return code is ompd_rc_unavailable, Not executing a task.\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc == ompd_rc_stale_handle) { + printf("Return code is stale_handle, may be in non parallel region.\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc != ompd_rc_ok) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL parallel_handle.\n"); + rc = ompd_get_curr_task_handle(thread_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "thread_handle.\n"); + rc = ompd_get_curr_task_handle(NULL, &task_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_task_parallel_handle + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_task_parallel_handle +*/ +PyObject *test_ompd_get_task_parallel_handle(PyObject *self, PyObject *args) { + + printf("Testing \"ompd_get_task_parallel_handle\"...\n"); + + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle = + PyCapsule_GetPointer(taskHandlePy, "TaskHandle"); + + ompd_parallel_handle_t *task_parallel_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = + ompd_get_task_parallel_handle(task_handle, &task_parallel_handle); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL task_parallel_handle.\n"); + rc = ompd_get_task_parallel_handle(task_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale_handle for NULL task_handle.\n"); + rc = ompd_get_task_parallel_handle(NULL, &task_parallel_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_generating_task_handle + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + c // may or may not be needed + ompdtestapi ompd_get_generating_task_handle +*/ +PyObject *test_ompd_get_generating_task_handle(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_generating_task_handle\"...\n"); + + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + ompd_task_handle_t *generating_task_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = + ompd_get_generating_task_handle(task_handle, &generating_task_handle); + if (rc == ompd_rc_unavailable) { + // ompd_rc_unavailable if no generating task handle exists. + printf("Success. Return code is ompd_rc_unavailable\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf( + "Test: Expecting ompd_rc_bad_input for NULL generating_task_handle.\n"); + rc = ompd_get_generating_task_handle(task_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale_handle for NULL task_handle.\n"); + rc = ompd_get_generating_task_handle(NULL, &generating_task_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_scheduling_task_handle + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_scheduling_task_handle +*/ +PyObject *test_ompd_get_scheduling_task_handle(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_scheduling_task_handle\"...\n"); + + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + ompd_task_handle_t *scheduling_task_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = + ompd_get_scheduling_task_handle(task_handle, &scheduling_task_handle); + if (rc == ompd_rc_unavailable) { + // ompd_rc_unavailable if no generating task handle exists. + printf( + "Success. Return code is ompd_rc_unavailable, No scheduling task.\n"); + printf("No more testing is possible.\n"); + return Py_None; + } else if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf( + "Test: Expecting ompd_rc_bad_input for NULL scheduling_task_handle.\n"); + rc = ompd_get_scheduling_task_handle(task_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale_handle for NULL task_handle.\n"); + rc = ompd_get_scheduling_task_handle(NULL, &scheduling_task_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_task_in_parallel + + Program: + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(4); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + c + ompdtestapi ompd_get_task_in_parallel +*/ +PyObject *test_ompd_get_task_in_parallel(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_task_in_parallel\"...\n"); + + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallel_handle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + ompd_task_handle_t *task_handle; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_task_in_parallel( + parallel_handle, 1 /* lesser than team-size-var*/, &task_handle); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // ompd_rc_bad_input if the thread_num argument is greater than or equal to + // the team-size-var ICV or negative + printf("Test: Invalid thread num (199).\n"); + rc = ompd_get_task_in_parallel(parallel_handle, 199, &task_handle); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Invalid thread num (-5).\n"); + rc = ompd_get_task_in_parallel(parallel_handle, -5, &task_handle); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL task_handle.\n"); + rc = ompd_get_task_in_parallel(parallel_handle, 1, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "parallel_handle.\n"); + rc = ompd_get_task_in_parallel(NULL, 1, &task_handle); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_rel_task_handle + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_rel_task_handle +*/ +PyObject *test_ompd_rel_task_handle(PyObject *self, PyObject *noargs) { + printf("Testing Not enabled for \"ompd_rel_task_handle\".\n"); + printf("Disabled.\n"); + + return Py_None; +} + +/* + Test API: ompd_task_handle_compare + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + c + ompdtestapi ompd_task_handle_compare +*/ +PyObject *test_ompd_task_handle_compare(PyObject *self, PyObject *args) { + printf("Testing \"ompd_task_handle_compare\"...\n"); + + PyObject *taskHandlePy1 = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle1 = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy1, "TaskHandle")); + PyObject *taskHandlePy2 = PyTuple_GetItem(args, 1); + ompd_task_handle_t *task_handle2 = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy2, "TaskHandle")); + + int cmp_value; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = + ompd_task_handle_compare(task_handle1, task_handle2, &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + if (cmp_value == 0) { + printf("Task Handles are Same.\n"); + } else { + // a value less than, equal to, or greater than 0 indicates that the task + // that corresponds to task_handle_1 is, respectively, less than, equal to, + // or greater than the task that corresponds to task_handle_2. + if (cmp_value <= 0) { + printf("Task handle 1 is lesser than handle 2, cmp_val = %d\n", + cmp_value); + printf("Test: Changing the order.\n"); + rc = ompd_task_handle_compare(task_handle2, task_handle1, &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } + if (cmp_value >= 0) + printf("Success now cmp_value is greater, %d.\n", cmp_value); + else + printf("Failed.\n"); + } else { + printf("Task 1 is greater than handle 2.\n"); + printf("Test: Changing the order.\n"); + rc = ompd_task_handle_compare(task_handle2, task_handle1, &cmp_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } + if (cmp_value <= 0) + printf("Success now cmp_value is lesser, %d.\n", cmp_value); + else + printf("Failed.\n"); + } + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL cmp_value.\n"); + rc = ompd_task_handle_compare(task_handle2, task_handle1, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or stale_handle for NULL " + "task_handle.\n"); + rc = ompd_task_handle_compare(NULL, task_handle1, &cmp_value); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + } + + return Py_None; +} + +/* + Test API: ompd_get_task_function + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_task_function +*/ +PyObject *test_ompd_get_task_function(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_task_function\"...\n"); + + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + + ompd_address_t entry_point; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_task_function(task_handle, &entry_point); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success. Entry point is %lx.\n", entry_point.address); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL entry_point.\n"); + rc = ompd_get_task_function(task_handle, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale_handle for NULL task_handle.\n"); + rc = ompd_get_task_function(NULL, &entry_point); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_task_frame + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_task_frame +*/ +PyObject *test_ompd_get_task_frame(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_task_frame\"...\n"); + + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + + ompd_frame_info_t exit_frame; + ompd_frame_info_t enter_frame; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_task_frame(task_handle, &exit_frame, &enter_frame); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL exit and enter frame.\n"); + rc = ompd_get_task_frame(task_handle, NULL, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale handle for NULL task_handle.\n"); + rc = ompd_get_task_frame(NULL, &exit_frame, &enter_frame); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_state + + Program: + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(4); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + c + ompdtestapi ompd_get_state +*/ +PyObject *test_ompd_get_state(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_state\"...\n"); + + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *thread_handle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + + ompd_word_t state; + ompt_wait_id_t wait_id; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_state(thread_handle, &state, &wait_id); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_error or stale handle for NULL " + "thread_handle.\n"); + rc = ompd_get_state(NULL, &state, &wait_id); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_display_control_vars + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_display_control_vars +*/ +PyObject *test_ompd_get_display_control_vars(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_display_control_vars\" ...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + const char *const *control_vars; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_display_control_vars(addr_handle, &control_vars); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting stale handle or bad_input for NULL addr_handle.\n"); + rc = ompd_get_display_control_vars(NULL, &control_vars); + if ((rc != ompd_rc_bad_input) && (rc != ompd_rc_stale_handle)) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error or bad_input for NULL control_vars.\n"); + rc = ompd_get_display_control_vars(addr_handle, NULL); + if (rc != ompd_rc_error && rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_rel_display_control_vars + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_rel_display_control_vars +*/ +PyObject *test_ompd_rel_display_control_vars(PyObject *self, PyObject *noargs) { + printf("Testing Not enabled for \"ompd_rel_display_control_vars\".\n"); + printf("Disabled.\n"); + + return Py_None; +} + +/* + Test API: ompd_enumerate_icvs + + Program: + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(2); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + c + ompdtestapi ompd_enumerate_icvs +*/ + +PyObject *test_ompd_enumerate_icvs(PyObject *self, PyObject *args) { + printf("Testing \"ompd_enumerate_icvs\"...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + ompd_icv_id_t current = 0; // To begin enumerating the ICVs, a tool should + // pass ompd_icv_undefined as the value of current + ompd_icv_id_t next_id; + const char *next_icv_name; + ompd_scope_t next_scope; + int more; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_enumerate_icvs(addr_handle, current, &next_id, + &next_icv_name, &next_scope, &more); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // ompd_rc_bad_input if an unknown value is provided in current + printf("Test: Unknown current value.\n"); + rc = ompd_enumerate_icvs( + addr_handle, + 99 /*unknown current value: greater than enum "ompd_icvompd_icv" */, + &next_id, &next_icv_name, &next_scope, &more); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf( + "Test: Expecting ompd_rc_bad_input for NULL next_id and next_icv_name\n"); + rc = + ompd_enumerate_icvs(addr_handle, current, NULL, NULL, &next_scope, &more); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf( + "Test: Expecting ompd_rc_error or stale_handle for NULL addr_handle.\n"); + rc = ompd_enumerate_icvs(NULL, current, &next_id, &next_icv_name, &next_scope, + &more); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +/* + Test API: ompd_get_icv_from_scope + + Program: + 1 #include + 2 #include + 3 int get_fib_num (int num) + 4 { + 5 int t1, t2; + 6 if (num < 2) + 7 return num; + 8 else { + 9 #pragma omp task shared(t1) + 10 t1 = get_fib_num(num-1); + 11 #pragma omp task shared(t2) + 12 t2 = get_fib_num(num-2); + 13 #pragma omp taskwait + 14 return t1+t2; + 15 } + 16 } + 17 + 18 int main () { + 19 int ret = 0; + 20 omp_set_num_threads(2); + 21 #pragma omp parallel + 22 { + 23 ret = get_fib_num(10); + 24 } + 25 printf ("Fib of 10 is %d", ret); + 26 return 0; + 27 } + + GDB Commands: + ompd init + b 10 + c + ompdtestapi ompd_get_icv_from_scope +*/ +PyObject *test_ompd_get_icv_from_scope_with_addr_handle(PyObject *self, + PyObject *args) { + printf("Testing \"ompd_get_icv_from_scope with addr_handle\"...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + ompd_word_t icv_value; + + printf("Test: With Correct Arguments.\n"); + // cannot import enum ompd_icv from omp-icv.cpp, hardcoding as of now, if enum + // changes it also requires modification + ompd_rc_t rc = ompd_get_icv_from_scope( + addr_handle, ompd_scope_address_space, + 19 /* ompd_icv_num_procs_var: check enum ompd_icv in omp-icv.cpp */, + &icv_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // ompd_rc_bad_input if an unknown value is provided in icv_id. + printf("Test: bad_input for unknown icv_id.\n"); + rc = ompd_get_icv_from_scope(addr_handle, ompd_scope_address_space, + 99 /*wrong value*/, &icv_value); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + // ompd_rc_incompatible if the ICV cannot be represented as an integer; + printf("Test: rc_incompatible for ICV that cant be represented as an " + "integer.\n"); + rc = ompd_get_icv_from_scope(addr_handle, ompd_scope_address_space, + 12 /*ompd_icv_tool_libraries_var*/, &icv_value); + if (rc != ompd_rc_incompatible) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL icv_value.\n"); + rc = ompd_get_icv_from_scope(addr_handle, ompd_scope_address_space, + 19 /*ompd_icv_num_procs_var*/, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error for NULL handle.\n"); + rc = ompd_get_icv_from_scope(NULL, ompd_scope_address_space, + 19 /*ompd_icv_num_procs_var*/, &icv_value); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +PyObject *test_ompd_get_icv_from_scope_with_thread_handle(PyObject *self, + PyObject *args) { + printf("Testing \"ompd_get_icv_from_scope with thread_handle\"...\n"); + + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *thread_handle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + + ompd_word_t icv_value; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_icv_from_scope( + thread_handle, ompd_scope_thread, + 22 /* ompd_icv_thread_num_var check enum ompd_icv in omp-icv.cpp */, + &icv_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + printf("Test: with nthreads_var for ompd_rc_incomplete.\n"); + rc = ompd_get_icv_from_scope(thread_handle, ompd_scope_thread, + 7 /*ompd_icv_nthreads_var*/, &icv_value); + if (rc != ompd_rc_incomplete) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + return Py_None; +} + +PyObject *test_ompd_get_icv_from_scope_with_parallel_handle(PyObject *self, + PyObject *args) { + printf("Testing \"ompd_get_icv_from_scope with parallel_handle\"...\n"); + + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallel_handle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + + ompd_word_t icv_value; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_icv_from_scope( + parallel_handle, ompd_scope_parallel, + 15 /*ompd_icv_active_levels_var:check enum ompd_icv in omp-icv.cpp */, + &icv_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + return Py_None; +} + +PyObject *test_ompd_get_icv_from_scope_with_task_handle(PyObject *self, + PyObject *args) { + printf("Testing \"ompd_get_icv_from_scope with task_handle\"...\n"); + + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *task_handle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + + ompd_word_t icv_value; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_icv_from_scope( + task_handle, ompd_scope_task, + 16 /*ompd_icv_thread_limit_var: check enum ompd_icv in omp-icv.cpp */, + &icv_value); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + return Py_None; +} +/* + Test API: ompd_get_icv_string_from_scope + + Program: + 1. #include + 2. #include + 3. int main () { + 4. omp_set_num_threads(4); + 5. #pragma omp parallel + 6. { + 7. printf("Parallel level 1, thread num = %d", + omp_get_thread_num()); + 8. } + 9. return 0; + 10. } + + GDB Commands: + ompd init + b 7 + c + ompdtestapi ompd_get_icv_string_from_scope +*/ +PyObject *test_ompd_get_icv_string_from_scope(PyObject *self, PyObject *args) { + printf("Testing \"ompd_get_icv_string_from_scope\"...\n"); + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addr_handle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + const char *icv_string; + + printf("Test: With Correct Arguments.\n"); + ompd_rc_t rc = ompd_get_icv_string_from_scope( + addr_handle, ompd_scope_address_space, + 12 /*ompd_icv_tool_libraries_var: check enum ompd_icv in omp-icv.cpp */, + &icv_string); + if (rc != ompd_rc_ok) { + printf("Failed. with return code = %d\n", rc); + return Py_None; + } else + printf("Success.\n"); + + // ompd_rc_bad_input if an unknown value is provided in icv_id. + printf("Test: bad_input for unknown icv_id.\n"); + rc = ompd_get_icv_string_from_scope(addr_handle, ompd_scope_address_space, + 99 /*wrong value*/, &icv_string); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + // Random checks with null and invalid args. + /* + ompd_rc_stale_handle: is returned when the specified handle is no + longer valid; + ompd_rc_bad_input: is returned when the input parameters + (other than handle) are invalid; + ompd_rc_error: is returned when a fatal error occurred; + */ + + printf("Test: Expecting ompd_rc_bad_input for NULL icv_string.\n"); + rc = ompd_get_icv_string_from_scope(addr_handle, ompd_scope_address_space, + 12 /*ompd_icv_tool_libraries_var*/, NULL); + if (rc != ompd_rc_bad_input) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + printf("Test: Expecting ompd_rc_error for NULL handle.\n"); + rc = ompd_get_icv_string_from_scope(NULL, ompd_scope_address_space, + 12 /*ompd_icv_tool_libraries_var*/, + &icv_string); + if (rc != ompd_rc_error && rc != ompd_rc_stale_handle) + printf("Failed. with return code = %d\n", rc); + else + printf("Success.\n"); + + return Py_None; +} + +PyObject *test_ompd_get_tool_data(PyObject *self, PyObject *args) { + printf("Disabled: Testing Not enabled for \"ompd_get_tool_data\".\n"); + + return Py_None; +} +PyObject *test_ompd_enumerate_states(PyObject *self, PyObject *args) { + printf("Disabled: Testing Not enabled for \"ompd_enumerate_states\".\n"); + + return Py_None; +} diff --git a/openmp/libompd/gdb-plugin/ompdModule.c b/openmp/libompd/gdb-plugin/ompdModule.c new file mode 100644 --- /dev/null +++ b/openmp/libompd/gdb-plugin/ompdModule.c @@ -0,0 +1,1652 @@ +/* + * ompdModule.c + */ + +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include +// #include +#include +#include +#include +#include +#include +#include + +void *ompd_library; + +#define OMPD_WEAK_ATTR __attribute__((weak)) + +struct _ompd_aspace_cont { + int id; +}; +struct _ompd_thread_cont { + int id; +}; +ompd_address_space_context_t acontext = {42}; + +PyObject *pModule; + +ompd_rc_t _print(const char *str, int category); + +// NOTE: implement functions to check parameters of OMPD API functions for +// correctness +OMPD_WEAK_ATTR ompd_rc_t ompd_get_api_version(ompd_word_t *addr) { + static ompd_rc_t (*my_get_api_version)(ompd_word_t *) = NULL; + if (!my_get_api_version) { + my_get_api_version = dlsym(ompd_library, "ompd_get_api_version"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_api_version(addr); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_version_string(const char **string) { + static ompd_rc_t (*my_get_version_string)(const char **) = NULL; + if (!my_get_version_string) { + my_get_version_string = dlsym(ompd_library, "ompd_get_version_string"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_version_string(string); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_finalize(void) { + static ompd_rc_t (*my_ompd_finalize)(void) = NULL; + if (!my_ompd_finalize) { + my_ompd_finalize = dlsym(ompd_library, "ompd_finalize"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_ompd_finalize(); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_process_initialize(ompd_address_space_context_t *context, + ompd_address_space_handle_t **handle) { + static ompd_rc_t (*my_ompd_process_initialize)( + ompd_address_space_context_t *, ompd_address_space_handle_t **) = NULL; + if (!my_ompd_process_initialize) { + my_ompd_process_initialize = dlsym(ompd_library, "ompd_process_initialize"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_ompd_process_initialize(context, handle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_omp_version( + ompd_address_space_handle_t *address_space, ompd_word_t *omp_version) { + static ompd_rc_t (*my_ompd_get_omp_version)(ompd_address_space_handle_t *, + ompd_word_t *) = NULL; + if (!my_ompd_get_omp_version) { + my_ompd_get_omp_version = dlsym(ompd_library, "ompd_get_omp_version"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_ompd_get_omp_version(address_space, omp_version); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_omp_version_string( + ompd_address_space_handle_t *address_space, const char **string) { + static ompd_rc_t (*my_ompd_get_omp_version_string)( + ompd_address_space_handle_t *, const char **) = NULL; + if (!my_ompd_get_omp_version_string) { + my_ompd_get_omp_version_string = + dlsym(ompd_library, "ompd_get_omp_version_string"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_ompd_get_omp_version_string(address_space, string); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_thread_handle( + ompd_address_space_handle_t *handle, ompd_thread_id_t kind, + ompd_size_t tidSize, const void *tid, ompd_thread_handle_t **threadHandle) { + static ompd_rc_t (*my_get_thread_handle)( + ompd_address_space_handle_t *, ompd_thread_id_t, ompd_size_t, + const void *, ompd_thread_handle_t **) = NULL; + if (!my_get_thread_handle) { + my_get_thread_handle = dlsym(ompd_library, "ompd_get_thread_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_thread_handle(handle, kind, tidSize, tid, threadHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_thread_in_parallel( + ompd_parallel_handle_t *parallelHandle, int threadNum, + ompd_thread_handle_t **threadHandle) { + static ompd_rc_t (*my_get_thread_in_parallel)(ompd_parallel_handle_t *, int, + ompd_thread_handle_t **) = NULL; + if (!my_get_thread_in_parallel) { + my_get_thread_in_parallel = + dlsym(ompd_library, "ompd_get_thread_in_parallel"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_thread_in_parallel(parallelHandle, threadNum, threadHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_thread_handle_compare( + ompd_thread_handle_t *thread_handle1, ompd_thread_handle_t *thread_handle2, + int *cmp_value) { + static ompd_rc_t (*my_thread_handle_compare)( + ompd_thread_handle_t *, ompd_thread_handle_t *, int *) = NULL; + if (!my_thread_handle_compare) { + my_thread_handle_compare = + dlsym(ompd_library, "ompd_thread_handle_compare"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_thread_handle_compare(thread_handle1, thread_handle2, cmp_value); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_get_curr_parallel_handle(ompd_thread_handle_t *threadHandle, + ompd_parallel_handle_t **parallelHandle) { + static ompd_rc_t (*my_get_current_parallel_handle)( + ompd_thread_handle_t *, ompd_parallel_handle_t **) = NULL; + if (!my_get_current_parallel_handle) { + my_get_current_parallel_handle = + dlsym(ompd_library, "ompd_get_curr_parallel_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_current_parallel_handle(threadHandle, parallelHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_parallel_handle_compare( + ompd_parallel_handle_t *parallel_handle_1, + ompd_parallel_handle_t *parallel_handle_2, int *cmp_value) { + static ompd_rc_t (*my_parallel_handle_compare)( + ompd_parallel_handle_t *, ompd_parallel_handle_t *, int *) = NULL; + if (!my_parallel_handle_compare) { + my_parallel_handle_compare = + dlsym(ompd_library, "ompd_parallel_handle_compare"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_parallel_handle_compare(parallel_handle_1, parallel_handle_2, + cmp_value); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_get_enclosing_parallel_handle(ompd_parallel_handle_t *parallelHandle, + ompd_parallel_handle_t **enclosing) { + static ompd_rc_t (*my_get_enclosing_parallel_handle)( + ompd_parallel_handle_t *, ompd_parallel_handle_t **) = NULL; + if (!my_get_enclosing_parallel_handle) { + my_get_enclosing_parallel_handle = + dlsym(ompd_library, "ompd_get_enclosing_parallel_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_enclosing_parallel_handle(parallelHandle, enclosing); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_get_task_parallel_handle(ompd_task_handle_t *taskHandle, + ompd_parallel_handle_t **taskParallelHandle) { + static ompd_rc_t (*my_get_task_parallel_handle)( + ompd_task_handle_t *, ompd_parallel_handle_t **) = NULL; + if (!my_get_task_parallel_handle) { + my_get_task_parallel_handle = + dlsym(ompd_library, "ompd_get_task_parallel_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_task_parallel_handle(taskHandle, taskParallelHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_curr_task_handle( + ompd_thread_handle_t *threadHandle, ompd_task_handle_t **taskHandle) { + static ompd_rc_t (*my_get_current_task_handle)(ompd_thread_handle_t *, + ompd_task_handle_t **) = NULL; + if (!my_get_current_task_handle) { + my_get_current_task_handle = + dlsym(ompd_library, "ompd_get_curr_task_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_current_task_handle(threadHandle, taskHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_generating_task_handle( + ompd_task_handle_t *taskHandle, ompd_task_handle_t **generating) { + static ompd_rc_t (*my_get_generating_task_handle)( + ompd_task_handle_t *, ompd_task_handle_t **) = NULL; + if (!my_get_generating_task_handle) { + my_get_generating_task_handle = + dlsym(ompd_library, "ompd_get_generating_task_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_generating_task_handle(taskHandle, generating); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_scheduling_task_handle( + ompd_task_handle_t *taskHandle, ompd_task_handle_t **scheduling) { + static ompd_rc_t (*my_get_scheduling_task_handle)( + ompd_task_handle_t *, ompd_task_handle_t **) = NULL; + if (!my_get_scheduling_task_handle) { + my_get_scheduling_task_handle = + dlsym(ompd_library, "ompd_get_scheduling_task_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_scheduling_task_handle(taskHandle, scheduling); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_get_task_in_parallel(ompd_parallel_handle_t *parallelHandle, int threadNum, + ompd_task_handle_t **taskHandle) { + static ompd_rc_t (*my_get_task_in_parallel)(ompd_parallel_handle_t *, int, + ompd_task_handle_t **) = NULL; + if (!my_get_task_in_parallel) { + my_get_task_in_parallel = dlsym(ompd_library, "ompd_get_task_in_parallel"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_task_in_parallel(parallelHandle, threadNum, taskHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_task_frame(ompd_task_handle_t *taskHandle, + ompd_frame_info_t *exitFrame, + ompd_frame_info_t *enterFrame) { + static ompd_rc_t (*my_get_task_frame)( + ompd_task_handle_t *, ompd_frame_info_t *, ompd_frame_info_t *) = NULL; + if (!my_get_task_frame) { + my_get_task_frame = dlsym(ompd_library, "ompd_get_task_frame"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_task_frame(taskHandle, exitFrame, enterFrame); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_icv_from_scope(void *handle, + ompd_scope_t scope, + ompd_icv_id_t icvId, + ompd_word_t *icvValue) { + static ompd_rc_t (*my_get_icv_from_scope)(void *, ompd_scope_t, ompd_icv_id_t, + ompd_word_t *) = NULL; + if (!my_get_icv_from_scope) { + my_get_icv_from_scope = dlsym(ompd_library, "ompd_get_icv_from_scope"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_icv_from_scope(handle, scope, icvId, icvValue); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_enumerate_icvs(ompd_address_space_handle_t *handle, ompd_icv_id_t current, + ompd_icv_id_t *next, const char **nextIcvName, + ompd_scope_t *nextScope, int *more) { + static ompd_rc_t (*my_enumerate_icvs)( + ompd_address_space_handle_t *, ompd_icv_id_t, ompd_icv_id_t *, + const char **, ompd_scope_t *, int *) = NULL; + if (!my_enumerate_icvs) { + my_enumerate_icvs = dlsym(ompd_library, "ompd_enumerate_icvs"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_enumerate_icvs(handle, current, next, nextIcvName, nextScope, more); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_enumerate_states(ompd_address_space_handle_t *addrSpaceHandle, + ompd_word_t currentState, ompd_word_t *nextState, + const char **nextStateName, ompd_word_t *moreEnums) { + static ompd_rc_t (*my_enumerate_states)(ompd_address_space_handle_t *, + ompd_word_t, ompd_word_t *, + const char **, ompd_word_t *) = NULL; + if (!my_enumerate_states) { + my_enumerate_states = dlsym(ompd_library, "ompd_enumerate_states"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_enumerate_states(addrSpaceHandle, currentState, nextState, + nextStateName, moreEnums); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_state(ompd_thread_handle_t *threadHandle, + ompd_word_t *state, + ompd_wait_id_t *waitId) { + static ompd_rc_t (*my_get_state)(ompd_thread_handle_t *, ompd_word_t *, + ompd_wait_id_t *) = NULL; + if (!my_get_state) { + my_get_state = dlsym(ompd_library, "ompd_get_state"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_state(threadHandle, state, waitId); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_task_function(ompd_task_handle_t *taskHandle, + ompd_address_t *entryPoint) { + static ompd_rc_t (*my_get_task_function)(ompd_task_handle_t *, + ompd_address_t *) = NULL; + if (!my_get_task_function) { + my_get_task_function = dlsym(ompd_library, "ompd_get_task_function"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_task_function(taskHandle, entryPoint); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_thread_id(ompd_thread_handle_t *threadHandle, + ompd_thread_id_t kind, + ompd_size_t tidSize, void *tid) { + static ompd_rc_t (*my_get_thread_id)(ompd_thread_handle_t *, ompd_thread_id_t, + ompd_size_t, void *) = NULL; + if (!my_get_thread_id) { + my_get_thread_id = dlsym(ompd_library, "ompd_get_thread_id"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_thread_id(threadHandle, kind, tidSize, tid); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_get_tool_data(void *handle, ompd_scope_t scope, + ompd_word_t *value, + ompd_address_t *ptr) { + static ompd_rc_t (*my_get_tool_data)(void *, ompd_scope_t, ompd_word_t *, + ompd_address_t *) = NULL; + if (!my_get_tool_data) { + my_get_tool_data = dlsym(ompd_library, "ompd_get_tool_data"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_tool_data(handle, scope, value, ptr); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_get_icv_string_from_scope(void *handle, ompd_scope_t scope, + ompd_icv_id_t icvId, const char **icvString) { + static ompd_rc_t (*my_get_icv_string_from_scope)( + void *, ompd_scope_t, ompd_icv_id_t, const char **) = NULL; + if (!my_get_icv_string_from_scope) { + my_get_icv_string_from_scope = + dlsym(ompd_library, "ompd_get_icv_string_from_scope"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_get_icv_string_from_scope(handle, scope, icvId, icvString); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_rel_thread_handle(ompd_thread_handle_t *threadHandle) { + static ompd_rc_t (*my_release_thread_handle)(ompd_thread_handle_t *) = NULL; + if (!my_release_thread_handle) { + my_release_thread_handle = dlsym(ompd_library, "ompd_rel_thread_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_release_thread_handle(threadHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_rel_parallel_handle(ompd_parallel_handle_t *parallelHandle) { + static ompd_rc_t (*my_release_parallel_handle)(ompd_parallel_handle_t *) = + NULL; + if (!my_release_parallel_handle) { + my_release_parallel_handle = + dlsym(ompd_library, "ompd_rel_parallel_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_release_parallel_handle(parallelHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t ompd_rel_task_handle(ompd_task_handle_t *taskHandle) { + static ompd_rc_t (*my_release_task_handle)(ompd_task_handle_t *) = NULL; + if (!my_release_task_handle) { + my_release_task_handle = dlsym(ompd_library, "ompd_rel_task_handle"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_release_task_handle(taskHandle); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_task_handle_compare(ompd_task_handle_t *task_handle_1, + ompd_task_handle_t *task_handle_2, int *cmp_value) { + static ompd_rc_t (*my_task_handle_compare)( + ompd_task_handle_t *, ompd_task_handle_t *, int *) = NULL; + if (!my_task_handle_compare) { + my_task_handle_compare = dlsym(ompd_library, "ompd_task_handle_compare"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_task_handle_compare(task_handle_1, task_handle_2, cmp_value); +} + +OMPD_WEAK_ATTR ompd_rc_t +ompd_get_display_control_vars(ompd_address_space_handle_t *address_space_handle, + const char *const **control_vars) { + static ompd_rc_t (*my_ompd_get_display_control_vars)( + ompd_address_space_handle_t *, const char *const **) = NULL; + if (!my_ompd_get_display_control_vars) { + my_ompd_get_display_control_vars = + dlsym(ompd_library, "ompd_get_display_control_vars"); + if (dlerror()) { + return ompd_rc_error; + } + } + return my_ompd_get_display_control_vars(address_space_handle, control_vars); +} + +/** + * Loads the OMPD library (libompd.so). Returns an integer with the version if + * the OMPD library could be loaded successfully. Error codes: -1: argument + * could not be converted to string -2: error when calling dlopen -3: error when + * fetching version of OMPD API else: see ompd return codes + */ +static PyObject *ompd_open(PyObject *self, PyObject *args) { + const char *name, *dlerr; + dlerror(); + if (!PyArg_ParseTuple(args, "s", &name)) { + return Py_BuildValue("i", -1); + } + ompd_library = dlopen(name, RTLD_LAZY); + if ((dlerr = dlerror())) { + return Py_BuildValue("i", -2); + } + if (dlerror()) { + return Py_BuildValue("i", -3); + } + ompd_word_t version; + ompd_rc_t rc = ompd_get_api_version(&version); + if (rc != ompd_rc_ok) + return Py_BuildValue("l", -10 - rc); + + int returnValue = version; + return Py_BuildValue("i", returnValue); +} + +/** + * Have the debugger print a string. + */ +ompd_rc_t _print(const char *str, int category) { + PyObject *pFunc = PyObject_GetAttrString(pModule, "_print"); + if (pFunc && PyCallable_Check(pFunc)) { + PyObject *pArgs = PyTuple_New(1); + PyTuple_SetItem(pArgs, 0, Py_BuildValue("s", str)); + PyObject_CallObject(pFunc, pArgs); + Py_XDECREF(pArgs); + } + Py_XDECREF(pFunc); + return ompd_rc_ok; +} + +void _printf(char *format, ...) { + va_list args; + va_start(args, format); + char output[1024]; + vsnprintf(output, 1024, format, args); + va_end(args); + _print(output, 0); +} + +/** + * Capsule destructors for thread, parallel and task handles. + */ +static void call_ompd_rel_thread_handle_temp(PyObject *capsule) { + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(capsule, "ThreadHandle")); + + ompd_rc_t retVal = ompd_rel_thread_handle(threadHandle); + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_rel_thread_handle! Error code: %d", + retVal); + } +} + +static void destroyThreadCapsule(PyObject *capsule) { + call_ompd_rel_thread_handle_temp(capsule); +} +static void (*my_thread_capsule_destructor)(PyObject *) = destroyThreadCapsule; + +static void call_ompd_rel_parallel_handle_temp(PyObject *capsule) { + ompd_parallel_handle_t *parallelHandle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(capsule, + "ParallelHandle")); + + ompd_rc_t retVal = ompd_rel_parallel_handle(parallelHandle); + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_rel_parallel_handle! Error " + "code: %d", + retVal); + } +} + +static void destroyParallelCapsule(PyObject *capsule) { + call_ompd_rel_parallel_handle_temp(capsule); +} +static void (*my_parallel_capsule_destructor)(PyObject *) = + destroyParallelCapsule; + +static void call_ompd_rel_task_handle_temp(PyObject *capsule) { + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(capsule, "TaskHandle")); + + ompd_rc_t retVal = ompd_rel_task_handle(taskHandle); + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_rel_task_handle!\n"); + } +} + +static void destroyTaskCapsule(PyObject *capsule) { + call_ompd_rel_task_handle_temp(capsule); +} +static void (*my_task_capsule_destructor)(PyObject *) = destroyTaskCapsule; + +/** + * Release thread handle. Called inside destructor for Python thread_handle + * object. + */ +static PyObject *call_ompd_rel_thread_handle(PyObject *self, PyObject *args) { + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + + ompd_rc_t retVal = ompd_rel_thread_handle(threadHandle); + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_rel_thread_handle! Error code: %d", + retVal); + } + return Py_BuildValue("l", retVal); +} + +/** + * Allocate memory in the debugger's address space. + */ +ompd_rc_t _alloc(ompd_size_t bytes, void **ptr) { + if (ptr == NULL) { + return ompd_rc_bad_input; + } + *ptr = malloc(bytes); + return ompd_rc_ok; +} + +/** + * Free memory in the debugger's address space. + */ +ompd_rc_t _free(void *ptr) { + free(ptr); + return ompd_rc_ok; +} + +/** + * Look up the sizes of primitive types in the target. + */ +ompd_rc_t _sizes(ompd_address_space_context_t *_acontext, /* IN */ + ompd_device_type_sizes_t *sizes) /* OUT */ +{ + if (acontext.id != _acontext->id) + return ompd_rc_stale_handle; + ompd_device_type_sizes_t mysizes = { + (uint8_t)sizeof(char), (uint8_t)sizeof(short), + (uint8_t)sizeof(int), (uint8_t)sizeof(long), + (uint8_t)sizeof(long long), (uint8_t)sizeof(void *)}; + *sizes = mysizes; + return ompd_rc_ok; +} + +/** + * Look up the address of a global symbol in the target. + */ +ompd_rc_t _sym_addr(ompd_address_space_context_t *context, /* IN */ + ompd_thread_context_t *tcontext, /* IN */ + const char *symbol_name, /* IN */ + ompd_address_t *symbol_addr, /* OUT */ + const char *file_name) /* IN */ +{ + int thread_id = -1; + PyObject *symbolAddress; + if (tcontext != NULL) { + thread_id = tcontext->id; + } + PyObject *pFunc = PyObject_GetAttrString(pModule, "_sym_addr"); + if (pFunc && PyCallable_Check(pFunc)) { + PyObject *pArgs = PyTuple_New(2); + PyTuple_SetItem(pArgs, 0, Py_BuildValue("i", thread_id)); + PyTuple_SetItem(pArgs, 1, Py_BuildValue("s", symbol_name)); + symbolAddress = PyObject_CallObject(pFunc, pArgs); + if (symbolAddress == NULL) { + PyErr_Print(); + } + symbol_addr->address = PyLong_AsLong(symbolAddress); + Py_XDECREF(pArgs); + Py_XDECREF(symbolAddress); + } + Py_XDECREF(pFunc); + return ompd_rc_ok; +} + +/** + * Read memory from the target. + */ +ompd_rc_t _read(ompd_address_space_context_t *context, /* IN */ + ompd_thread_context_t *tcontext, /* IN */ + const ompd_address_t *addr, /* IN */ + ompd_size_t nbytes, /* IN */ + void *buffer) /* OUT */ +{ + uint64_t readMem = (uint64_t)addr->address; + PyObject *pFunc = PyObject_GetAttrString(pModule, "_read"); + if (pFunc && PyCallable_Check(pFunc)) { + PyObject *pArgs = PyTuple_New(2); + PyTuple_SetItem(pArgs, 0, Py_BuildValue("l", readMem)); + PyTuple_SetItem(pArgs, 1, Py_BuildValue("l", nbytes)); + PyObject *retArray = PyObject_CallObject(pFunc, pArgs); + Py_XDECREF(pArgs); + if (retArray == NULL) { + PyErr_Print(); + } + if (!PyByteArray_Check(retArray)) { + return ompd_rc_error; + } + Py_ssize_t retSize = PyByteArray_Size(retArray); + const char *strBuf = PyByteArray_AsString(retArray); + if ((ompd_size_t)retSize != nbytes) { + return ompd_rc_error; + } + memcpy(buffer, strBuf, nbytes); + Py_XDECREF(retArray); + } + Py_XDECREF(pFunc); + return ompd_rc_ok; +} + +/** + * Reads string from target. + */ +ompd_rc_t _read_string(ompd_address_space_context_t *context, /* IN */ + ompd_thread_context_t *tcontext, /* IN */ + const ompd_address_t *addr, /* IN */ + ompd_size_t nbytes, /* IN */ + void *buffer) /* OUT */ +{ + ompd_rc_t retVal = ompd_rc_ok; + uint64_t readMem = (uint64_t)addr->address; + PyObject *pFunc = PyObject_GetAttrString(pModule, "_read_string"); + PyObject *pArgs = PyTuple_New(1); + PyTuple_SetItem(pArgs, 0, Py_BuildValue("l", readMem)); + PyObject *retString = PyObject_CallObject(pFunc, pArgs); + Py_XDECREF(pArgs); + if (!PyUnicode_Check(retString)) { + return ompd_rc_error; + } + Py_ssize_t retSize; + const char *strbuffer = PyUnicode_AsUTF8AndSize(retString, &retSize); + if ((ompd_size_t)retSize + 1 >= nbytes) { + retVal = ompd_rc_incomplete; + } + strncpy(buffer, strbuffer, nbytes); + ((char *)buffer)[nbytes - 1] = '\0'; + return retVal; +} + +/** + * Write memory from the target. + */ +ompd_rc_t +_endianess(ompd_address_space_context_t *address_space_context, /* IN */ + const void *input, /* IN */ + ompd_size_t unit_size, /* IN */ + ompd_size_t count, /* IN: number of primitive type */ + void *output) { + if (acontext.id != address_space_context->id) + return ompd_rc_stale_handle; + memmove(output, input, count * unit_size); + return ompd_rc_ok; +} + +/** + * Returns thread context for thread id; helper function for _thread_context + * callback. + */ +ompd_thread_context_t *get_thread_context(int id) { + static ompd_thread_context_t *tc = NULL; + static int size = 0; + int i; + if (id < 1) + return NULL; + if (tc == NULL) { + size = 16; + tc = malloc(size * sizeof(ompd_thread_context_t)); + for (i = 0; i < size; i++) + tc[i].id = i + 1; + } + if (id - 1 >= size) { + size += 16; + tc = realloc(tc, size * sizeof(ompd_thread_context_t)); + for (i = 0; i < size; i++) + tc[i].id = i + 1; + } + return tc + id - 1; +} + +/** + * Get thread specific context. + */ +ompd_rc_t +_thread_context(ompd_address_space_context_t *context, /* IN */ + ompd_thread_id_t kind, /* IN, 0 for pthread, 1 for lwp */ + ompd_size_t sizeof_thread_id, /* IN */ + const void *thread_id, /* IN */ + ompd_thread_context_t **thread_context) /* OUT */ +{ + if (acontext.id != context->id) + return ompd_rc_stale_handle; + if (kind != 0 && kind != 1) + return ompd_rc_unsupported; + long int tid; + if (sizeof(long int) >= 8 && sizeof_thread_id == 8) + tid = *(uint64_t *)thread_id; + else if (sizeof(long int) >= 4 && sizeof_thread_id == 4) + tid = *(uint32_t *)thread_id; + else if (sizeof(long int) >= 2 && sizeof_thread_id == 2) + tid = *(uint16_t *)thread_id; + else + return ompd_rc_bad_input; + PyObject *pFunc = PyObject_GetAttrString(pModule, "_thread_context"); + if (pFunc && PyCallable_Check(pFunc)) { + PyObject *pArgs = PyTuple_New(2); + PyTuple_SetItem(pArgs, 0, Py_BuildValue("l", kind)); + PyTuple_SetItem(pArgs, 1, Py_BuildValue("l", tid)); + PyObject *res = PyObject_CallObject(pFunc, pArgs); + int resAsInt = (int)PyLong_AsLong(res); + if (resAsInt == -1) { + // NOTE: could not find match for thread_id + return ompd_rc_unavailable; + } + (*thread_context) = get_thread_context(resAsInt); + Py_XDECREF(pArgs); + Py_XDECREF(res); + Py_XDECREF(pFunc); + if (*thread_context == NULL) { + return ompd_rc_bad_input; + } + return ompd_rc_ok; + } + Py_XDECREF(pFunc); + return ompd_rc_error; +} + +/** + * Calls ompd_process_initialize; returns pointer to ompd_address_space_handle. + */ +static PyObject *call_ompd_initialize(PyObject *self, PyObject *noargs) { + pModule = PyImport_Import(PyUnicode_FromString("ompd_callbacks")); + + static ompd_callbacks_t table = { + _alloc, _free, _print, _sizes, _sym_addr, _read, + NULL, _read_string, _endianess, _endianess, _thread_context}; + + ompd_rc_t (*my_ompd_init)(ompd_word_t version, ompd_callbacks_t *) = + dlsym(ompd_library, "ompd_initialize"); + ompd_rc_t returnInit = my_ompd_init(201811, &table); + if (returnInit != ompd_rc_ok) { + _printf("An error occurred when calling ompd_initialize! Error code: %d", + returnInit); + } + ompd_address_space_handle_t *addr_space = NULL; + ompd_rc_t (*my_proc_init)(ompd_address_space_context_t *, + ompd_address_space_handle_t **) = + dlsym(ompd_library, "ompd_process_initialize"); + ompd_rc_t retProcInit = my_proc_init(&acontext, &addr_space); + if (retProcInit != ompd_rc_ok) { + _printf("An error occurred when calling ompd_process_initialize! Error " + "code: %d", + retProcInit); + } + return PyCapsule_New(addr_space, "AddressSpace", NULL); +} + +/** + * Returns a PyCapsule pointer to thread handle for thread with the given id. + */ +static PyObject *get_thread_handle(PyObject *self, PyObject *args) { + PyObject *threadIdTup = PyTuple_GetItem(args, 0); + uint64_t threadId = (uint64_t)PyLong_AsLong(threadIdTup); + // NOTE: compiler does not know what thread handle looks like, so no memory + // is allocated automatically in the debugger's memory space + + PyObject *addrSpaceTup = PyTuple_GetItem(args, 1); + ompd_thread_handle_t *threadHandle; + ompd_address_space_handle_t *addrSpace = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceTup, + "AddressSpace"); + + ompd_size_t sizeof_tid = (ompd_size_t)sizeof(uint64_t); + ompd_rc_t retVal = ompd_get_thread_handle(addrSpace, 1, sizeof_tid, &threadId, + &threadHandle); + + if (retVal == ompd_rc_unavailable) { + return Py_BuildValue("i", -1); + } else if (retVal != ompd_rc_ok) { + _printf( + "An error occured when calling ompd_get_thread_handle! Error code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(threadHandle, "ThreadHandle", + my_thread_capsule_destructor); +} + +/** + * Returns a PyCapsule pointer to a thread handle for a specific thread id in + * the current parallel context. + */ +static PyObject *call_ompd_get_thread_in_parallel(PyObject *self, + PyObject *args) { + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + int threadNum = (int)PyLong_AsLong(PyTuple_GetItem(args, 1)); + ompd_parallel_handle_t *parallelHandle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + ompd_thread_handle_t *threadHandle; + + ompd_rc_t retVal = + ompd_get_thread_in_parallel(parallelHandle, threadNum, &threadHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_thread_in_parallel! Error " + "code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(threadHandle, "ThreadHandle", + my_thread_capsule_destructor); +} + +/** + * Returns a PyCapsule pointer to the parallel handle of the current parallel + * region associated with a thread. + */ +static PyObject *call_ompd_get_curr_parallel_handle(PyObject *self, + PyObject *args) { + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + ompd_parallel_handle_t *parallelHandle; + + ompd_rc_t retVal = + ompd_get_curr_parallel_handle(threadHandle, ¶llelHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_curr_parallel_handle! " + "Error code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(parallelHandle, "ParallelHandle", + my_parallel_capsule_destructor); +} + +/** + * Returns a PyCapsule pointer to the parallel handle for the parallel region + * enclosing the parallel region specified by parallel_handle. + */ +static PyObject *call_ompd_get_enclosing_parallel_handle(PyObject *self, + PyObject *args) { + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallelHandle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + ompd_parallel_handle_t *enclosingParallelHandle; + + ompd_rc_t retVal = ompd_get_enclosing_parallel_handle( + parallelHandle, &enclosingParallelHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling " + "ompd_get_enclosing_parallel_handle! Error code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(enclosingParallelHandle, "ParallelHandle", + my_parallel_capsule_destructor); +} + +/** + * Returns a PyCapsule pointer to the parallel handle for the parallel region + * enclosing the task specified. + */ +static PyObject *call_ompd_get_task_parallel_handle(PyObject *self, + PyObject *args) { + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *taskHandle = + PyCapsule_GetPointer(taskHandlePy, "TaskHandle"); + ompd_parallel_handle_t *taskParallelHandle; + + ompd_rc_t retVal = + ompd_get_task_parallel_handle(taskHandle, &taskParallelHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_task_parallel_handle! " + "Error code: %d"); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(taskParallelHandle, "ParallelHandle", + my_parallel_capsule_destructor); +} + +/** + * Releases a parallel handle; is called in by the destructor of a Python + * parallel_handle object. + */ +static PyObject *call_ompd_rel_parallel_handle(PyObject *self, PyObject *args) { + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + ompd_parallel_handle_t *parallelHandle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + + ompd_rc_t retVal = ompd_rel_parallel_handle(parallelHandle); + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_rel_parallel_handle! Error " + "code: %d", + retVal); + } + return Py_BuildValue("l", retVal); +} + +/** + * Returns a PyCapsule pointer to the task handle of the current task region + * associated with a thread. + */ +static PyObject *call_ompd_get_curr_task_handle(PyObject *self, + PyObject *args) { + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + ompd_task_handle_t *taskHandle; + + ompd_rc_t retVal = ompd_get_curr_task_handle(threadHandle, &taskHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_curr_task_handle! Error " + "code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(taskHandle, "TaskHandle", my_task_capsule_destructor); +} + +/** + * Returns a task handle for the task that created the task specified. + */ +static PyObject *call_ompd_get_generating_task_handle(PyObject *self, + PyObject *args) { + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + ompd_task_handle_t *generatingTaskHandle; + + ompd_rc_t retVal = + ompd_get_generating_task_handle(taskHandle, &generatingTaskHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_generating_task_handle! " + "Error code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(generatingTaskHandle, "TaskHandle", + my_task_capsule_destructor); +} + +/** + * Returns the task handle for the task that scheduled the task specified. + */ +static PyObject *call_ompd_get_scheduling_task_handle(PyObject *self, + PyObject *args) { + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + ompd_task_handle_t *schedulingTaskHandle; + + ompd_rc_t retVal = + ompd_get_scheduling_task_handle(taskHandle, &schedulingTaskHandle); + + if (retVal == ompd_rc_unavailable) { + return Py_None; + } else if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_scheduling_task_handle! " + "Error code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(schedulingTaskHandle, "TaskHandle", + my_task_capsule_destructor); +} + +/** + * Returns task handles for the implicit tasks associated with a parallel + * region. + */ +static PyObject *call_ompd_get_task_in_parallel(PyObject *self, + PyObject *args) { + PyObject *parallelHandlePy = PyTuple_GetItem(args, 0); + int threadNum = (int)PyLong_AsLong(PyTuple_GetItem(args, 1)); + ompd_parallel_handle_t *parallelHandle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(parallelHandlePy, + "ParallelHandle")); + ompd_task_handle_t *taskHandle; + + ompd_rc_t retVal = + ompd_get_task_in_parallel(parallelHandle, threadNum, &taskHandle); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_task_in_parallel! Error " + "code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + return PyCapsule_New(taskHandle, "TaskHandle", my_task_capsule_destructor); +} + +/** + * Releases a task handle; is called by the destructor of a Python task_handle + * object. + */ +static PyObject *call_ompd_rel_task_handle(PyObject *self, PyObject *args) { + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(taskHandlePy, "TaskHandle")); + + ompd_rc_t retVal = ompd_rel_task_handle(taskHandle); + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_rel_task_handle! Error code: %d", + retVal); + } + return Py_BuildValue("l", retVal); +} + +/** + * Calls ompd_get_task_frame and returns a PyCapsule for the enter frame of the + * given task. + */ +static PyObject *call_ompd_get_task_frame(PyObject *self, PyObject *args) { + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)PyCapsule_GetPointer(taskHandlePy, "TaskHandle"); + ompd_frame_info_t exitFrameInfo; + ompd_frame_info_t enterFrameInfo; + + ompd_rc_t retVal = + ompd_get_task_frame(taskHandle, &exitFrameInfo, &enterFrameInfo); + + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_get_task_frame! Error code: %d", + retVal); + return Py_BuildValue("l", retVal); + } + + PyObject *result = PyTuple_New(4); + PyTuple_SetItem( + result, 0, PyLong_FromUnsignedLong(enterFrameInfo.frame_address.address)); + PyTuple_SetItem(result, 1, + PyLong_FromUnsignedLong(enterFrameInfo.frame_flag)); + PyTuple_SetItem(result, 2, + PyLong_FromUnsignedLong(exitFrameInfo.frame_address.address)); + PyTuple_SetItem(result, 3, PyLong_FromUnsignedLong(exitFrameInfo.frame_flag)); + return result; +} + +/** + * Calls ompd_get_icv_from_scope. + */ +static PyObject *call_ompd_get_icv_from_scope(PyObject *self, PyObject *args) { + PyObject *addrSpaceHandlePy = PyTuple_GetItem(args, 0); + PyObject *scopePy = PyTuple_GetItem(args, 1); + PyObject *icvIdPy = PyTuple_GetItem(args, 2); + + ompd_scope_t scope = (ompd_scope_t)PyLong_AsLong(scopePy); + ompd_address_space_handle_t *addrSpaceHandle; + switch (scope) { + case ompd_scope_thread: + addrSpaceHandle = (ompd_address_space_handle_t *)PyCapsule_GetPointer( + addrSpaceHandlePy, "ThreadHandle"); + break; + case ompd_scope_parallel: + addrSpaceHandle = (ompd_address_space_handle_t *)PyCapsule_GetPointer( + addrSpaceHandlePy, "ParallelHandle"); + break; + case ompd_scope_implicit_task: + addrSpaceHandle = (ompd_address_space_handle_t *)PyCapsule_GetPointer( + addrSpaceHandlePy, "TaskHandle"); + break; + case ompd_scope_task: + addrSpaceHandle = (ompd_address_space_handle_t *)PyCapsule_GetPointer( + addrSpaceHandlePy, "TaskHandle"); + break; + default: + addrSpaceHandle = (ompd_address_space_handle_t *)PyCapsule_GetPointer( + addrSpaceHandlePy, "AddressSpace"); + break; + } + + ompd_icv_id_t icvId = (ompd_icv_id_t)PyLong_AsLong(icvIdPy); + ompd_word_t icvValue; + + ompd_rc_t retVal = + ompd_get_icv_from_scope(addrSpaceHandle, scope, icvId, &icvValue); + + if (retVal != ompd_rc_ok) { + if (retVal != ompd_rc_incomplete) { + _printf("An error occurred when calling ompd_get_icv_from_scope(%i, %i): " + "Error code: %d", + scope, icvId, retVal); + } + return Py_None; + } + return PyLong_FromLong(icvValue); +} + +/** + * Calls ompd_enumerate_icvs. + */ +static PyObject *call_ompd_enumerate_icvs(PyObject *self, PyObject *args) { + PyObject *addrSpaceHandlePy = PyTuple_GetItem(args, 0); + PyObject *currentPy = PyTuple_GetItem(args, 1); + ompd_icv_id_t current = (ompd_icv_id_t)(PyLong_AsLong(currentPy)); + ompd_address_space_handle_t *addrSpaceHandle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceHandlePy, + "AddressSpace"); + + const char *nextIcv; + ompd_scope_t nextScope; + int more; + ompd_icv_id_t nextId; + + ompd_rc_t retVal = ompd_enumerate_icvs(addrSpaceHandle, current, &nextId, + &nextIcv, &nextScope, &more); + + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_enumerate_icvs! Error code: %d", + retVal); + return Py_None; + } + PyObject *retTuple = PyTuple_New(4); + PyTuple_SetItem(retTuple, 0, PyLong_FromUnsignedLong(nextId)); + PyTuple_SetItem(retTuple, 1, PyUnicode_FromString(nextIcv)); + PyTuple_SetItem(retTuple, 2, PyLong_FromUnsignedLong(nextScope)); + PyTuple_SetItem(retTuple, 3, PyLong_FromLong(more)); + return retTuple; +} + +/** + * Calls ompd_enumerate_states. + */ +static PyObject *call_ompd_enumerate_states(PyObject *self, PyObject *args) { + PyObject *addrSpaceHandlePy = PyTuple_GetItem(args, 0); + ompd_address_space_handle_t *addrSpaceHandle = + (ompd_address_space_handle_t *)PyCapsule_GetPointer(addrSpaceHandlePy, + "AddressSpace"); + ompd_word_t currentState = + (ompd_word_t)PyLong_AsLong(PyTuple_GetItem(args, 1)); + + ompd_word_t nextState; + const char *nextStateName; + ompd_word_t moreEnums; + + ompd_rc_t retVal = ompd_enumerate_states( + addrSpaceHandle, currentState, &nextState, &nextStateName, &moreEnums); + + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_enumerate_states! Error code: %d", + retVal); + return Py_None; + } + PyObject *retTuple = PyTuple_New(3); + PyTuple_SetItem(retTuple, 0, PyLong_FromLong(nextState)); + PyTuple_SetItem(retTuple, 1, PyUnicode_FromString(nextStateName)); + PyTuple_SetItem(retTuple, 2, PyLong_FromLong(moreEnums)); + return retTuple; +} + +/** + * Calls ompd_get_state. + */ +static PyObject *call_ompd_get_state(PyObject *self, PyObject *args) { + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle"); + ompd_word_t state; + ompd_wait_id_t waitId; + + ompd_rc_t retVal = ompd_get_state(threadHandle, &state, &waitId); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_state! Error code: %d", + retVal); + return Py_None; + } + PyObject *retTuple = PyTuple_New(2); + PyTuple_SetItem(retTuple, 0, PyLong_FromLong(state)); + PyTuple_SetItem(retTuple, 1, PyLong_FromUnsignedLong(waitId)); + return retTuple; +} + +/** + * Calls ompd_get_task_function and returns entry point of the code that + * corresponds to the code executed by the task. + */ +static PyObject *call_ompd_get_task_function(PyObject *self, PyObject *args) { + PyObject *taskHandlePy = PyTuple_GetItem(args, 0); + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)PyCapsule_GetPointer(taskHandlePy, "TaskHandle"); + ompd_address_t entryPoint; + + ompd_rc_t retVal = ompd_get_task_function(taskHandle, &entryPoint); + + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_get_task_function! Error code: %d", + retVal); + return Py_None; + } + return PyLong_FromLong((long)entryPoint.address); +} + +/** + * Prints pointer stored inside PyCapusle. + */ +static PyObject *print_capsule(PyObject *self, PyObject *args) { + PyObject *capsule = PyTuple_GetItem(args, 0); + PyObject *name = PyTuple_GetItem(args, 1); + void *pointer = + PyCapsule_GetPointer(capsule, PyUnicode_AsUTF8AndSize(name, NULL)); + _printf("Capsule pointer: %p", pointer); + return Py_None; +} + +/** + * Calls ompd_get_thread_id for given handle and returns the thread id as a + * long. + */ +static PyObject *call_ompd_get_thread_id(PyObject *self, PyObject *args) { + PyObject *threadHandlePy = PyTuple_GetItem(args, 0); + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(threadHandlePy, + "ThreadHandle")); + ompd_thread_id_t kind = 0; // OMPD_THREAD_ID_PTHREAD + ompd_size_t sizeOfId = (ompd_size_t)sizeof(pthread_t); + + uint64_t thread; + ompd_rc_t retVal = ompd_get_thread_id(threadHandle, kind, sizeOfId, &thread); + + if (retVal != ompd_rc_ok) { + kind = 1; // OMPD_THREAD_ID_LWP + retVal = ompd_get_thread_id(threadHandle, kind, sizeOfId, &thread); + if (retVal != ompd_rc_ok) { + _printf( + "An error occurred when calling ompd_get_thread_id! Error code: %d", + retVal); + return Py_None; + } + } + return PyLong_FromLong(thread); +} + +/** + * Calls ompd_get_tool_data and returns a tuple containing the value and pointer + * of the ompt_data_t union for the selected scope. + */ +static PyObject *call_ompd_get_tool_data(PyObject *self, PyObject *args) { + PyObject *scopePy = PyTuple_GetItem(args, 0); + ompd_scope_t scope = (ompd_scope_t)(PyLong_AsLong(scopePy)); + PyObject *handlePy = PyTuple_GetItem(args, 1); + void *handle = NULL; + + if (scope == 3) { + ompd_thread_handle_t *threadHandle = + (ompd_thread_handle_t *)(PyCapsule_GetPointer(handlePy, + "ThreadHandle")); + handle = threadHandle; + } else if (scope == 4) { + ompd_parallel_handle_t *parallelHandle = + (ompd_parallel_handle_t *)(PyCapsule_GetPointer(handlePy, + "ParallelHandle")); + handle = parallelHandle; + } else if (scope == 5 || scope == 6) { + ompd_task_handle_t *taskHandle = + (ompd_task_handle_t *)(PyCapsule_GetPointer(handlePy, "TaskHandle")); + handle = taskHandle; + } else { + _printf("An error occured when calling ompd_get_tool_data! Scope type not " + "supported."); + return Py_None; + } + + ompd_word_t value; + ompd_address_t ptr; + + ompd_rc_t retVal = ompd_get_tool_data(handle, scope, &value, &ptr); + + if (retVal != ompd_rc_ok) { + _printf("An error occured when calling ompd_get_tool_data! Error code: %d", + retVal); + return Py_None; + } + + PyObject *retTuple = PyTuple_New(2); + PyTuple_SetItem(retTuple, 0, PyLong_FromLong(value)); + PyTuple_SetItem(retTuple, 1, PyLong_FromLong(ptr.address)); + return retTuple; +} + +/** Calls ompd_get_icv_string_from_scope. + */ +static PyObject *call_ompd_get_icv_string_from_scope(PyObject *self, + PyObject *args) { + PyObject *handlePy = PyTuple_GetItem(args, 0); + PyObject *scopePy = PyTuple_GetItem(args, 1); + PyObject *icvIdPy = PyTuple_GetItem(args, 2); + + ompd_scope_t scope = (ompd_scope_t)PyLong_AsLong(scopePy); + void *handle = NULL; + switch (scope) { + case ompd_scope_thread: + handle = + (ompd_thread_handle_t *)PyCapsule_GetPointer(handlePy, "ThreadHandle"); + break; + case ompd_scope_parallel: + handle = (ompd_parallel_handle_t *)PyCapsule_GetPointer(handlePy, + "ParallelHandle"); + break; + case ompd_scope_implicit_task: + handle = (ompd_task_handle_t *)PyCapsule_GetPointer(handlePy, "TaskHandle"); + break; + case ompd_scope_task: + handle = (ompd_task_handle_t *)PyCapsule_GetPointer(handlePy, "TaskHandle"); + break; + default: + handle = (ompd_address_space_handle_t *)PyCapsule_GetPointer( + handlePy, "AddressSpace"); + break; + } + + ompd_icv_id_t icvId = (ompd_icv_id_t)PyLong_AsLong(icvIdPy); + const char *icvString; + + ompd_rc_t retVal = + ompd_get_icv_string_from_scope(handle, scope, icvId, &icvString); + + if (retVal != ompd_rc_ok) { + _printf("An error occurred when calling ompd_get_icv_string_from_scope! " + "Error code: %d", + retVal); + return Py_None; + } + return PyUnicode_FromString(icvString); +} + +// Prototypes of API test functions. +PyObject *test_ompd_get_thread_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_curr_parallel_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_thread_in_parallel(PyObject *self, PyObject *args); +PyObject *test_ompd_thread_handle_compare(PyObject *self, PyObject *args); +PyObject *test_ompd_get_thread_id(PyObject *self, PyObject *args); +PyObject *test_ompd_rel_thread_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_enclosing_parallel_handle(PyObject *self, + PyObject *args); +PyObject *test_ompd_parallel_handle_compare(PyObject *self, PyObject *args); +PyObject *test_ompd_rel_parallel_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_initialize(PyObject *self, PyObject *noargs); +PyObject *test_ompd_get_api_version(PyObject *self, PyObject *noargs); +PyObject *test_ompd_get_version_string(PyObject *self, PyObject *noargs); +PyObject *test_ompd_finalize(PyObject *self, PyObject *noargs); +PyObject *test_ompd_process_initialize(PyObject *self, PyObject *noargs); +PyObject *test_ompd_device_initialize(PyObject *self, PyObject *noargs); +PyObject *test_ompd_rel_address_space_handle(PyObject *self, PyObject *noargs); +PyObject *test_ompd_get_omp_version(PyObject *self, PyObject *args); +PyObject *test_ompd_get_omp_version_string(PyObject *self, PyObject *args); +PyObject *test_ompd_get_curr_task_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_task_parallel_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_generating_task_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_scheduling_task_handle(PyObject *self, PyObject *args); +PyObject *test_ompd_get_task_in_parallel(PyObject *self, PyObject *args); +PyObject *test_ompd_rel_task_handle(PyObject *self, PyObject *noargs); +PyObject *test_ompd_task_handle_compare(PyObject *self, PyObject *args); +PyObject *test_ompd_get_task_function(PyObject *self, PyObject *args); +PyObject *test_ompd_get_task_frame(PyObject *self, PyObject *args); +PyObject *test_ompd_get_state(PyObject *self, PyObject *args); +PyObject *test_ompd_get_display_control_vars(PyObject *self, PyObject *args); +PyObject *test_ompd_rel_display_control_vars(PyObject *self, PyObject *noargs); +PyObject *test_ompd_enumerate_icvs(PyObject *self, PyObject *noargs); +PyObject *test_ompd_get_icv_from_scope_with_addr_handle(PyObject *self, + PyObject *noargs); +PyObject *test_ompd_get_icv_from_scope_with_thread_handle(PyObject *self, + PyObject *noargs); +PyObject *test_ompd_get_icv_from_scope_with_parallel_handle(PyObject *self, + PyObject *noargs); +PyObject *test_ompd_get_icv_from_scope_with_task_handle(PyObject *self, + PyObject *noargs); +PyObject *test_ompd_get_icv_string_from_scope(PyObject *self, PyObject *noargs); +PyObject *test_ompd_get_tool_data(PyObject *self, PyObject *noargs); +PyObject *test_ompd_enumerate_states(PyObject *self, PyObject *noargs); +/** + * Binds Python function names to C functions. + */ +static PyMethodDef ompdModule_methods[] = { + {"ompd_open", ompd_open, METH_VARARGS, + "Execute dlopen, return OMPD version."}, + {"call_ompd_initialize", call_ompd_initialize, METH_NOARGS, + "Initializes OMPD environment and callbacks."}, + {"call_ompd_rel_thread_handle", call_ompd_rel_thread_handle, METH_VARARGS, + "Releases a thread handle."}, + {"get_thread_handle", get_thread_handle, METH_VARARGS, + "Collects information on threads."}, + {"call_ompd_get_thread_in_parallel", call_ompd_get_thread_in_parallel, + METH_VARARGS, + "Obtains handle for a certain thread within parallel region."}, + {"call_ompd_get_curr_parallel_handle", call_ompd_get_curr_parallel_handle, + METH_VARARGS, + "Obtains a pointer to the parallel handle for the current parallel " + "region."}, + {"call_ompd_get_enclosing_parallel_handle", + call_ompd_get_enclosing_parallel_handle, METH_VARARGS, + "Obtains a pointer to the parallel handle for the parallel region " + "enclosing the parallel region specified."}, + {"call_ompd_get_task_parallel_handle", call_ompd_get_task_parallel_handle, + METH_VARARGS, + "Obtains a pointer to the parallel handle for the parallel region " + "enclosing the task region specified."}, + {"call_ompd_rel_parallel_handle", call_ompd_rel_parallel_handle, + METH_VARARGS, "Releases a parallel region handle."}, + {"call_ompd_get_curr_task_handle", call_ompd_get_curr_task_handle, + METH_VARARGS, + "Obtains a pointer to the task handle for the current task region " + "associated with an OpenMP thread."}, + {"call_ompd_get_generating_task_handle", + call_ompd_get_generating_task_handle, METH_VARARGS, + "Obtains a pointer to the task handle for the task that was created when " + "the task handle specified was encountered."}, + {"call_ompd_get_scheduling_task_handle", + call_ompd_get_scheduling_task_handle, METH_VARARGS, + "Obtains a pointer to the task handle for the task that scheduled the " + "task specified."}, + {"call_ompd_get_task_in_parallel", call_ompd_get_task_in_parallel, + METH_VARARGS, + "Obtains the handle for implicit tasks associated with a parallel " + "region."}, + {"call_ompd_rel_task_handle", call_ompd_rel_task_handle, METH_VARARGS, + "Releases a task handle."}, + {"call_ompd_get_task_frame", call_ompd_get_task_frame, METH_VARARGS, + "Returns a pointer to the enter and exit frame address and flag of the " + "given task."}, + {"call_ompd_enumerate_icvs", call_ompd_enumerate_icvs, METH_VARARGS, + "Saves ICVs in map."}, + {"call_ompd_get_icv_from_scope", call_ompd_get_icv_from_scope, METH_VARARGS, + "Gets ICVs from scope."}, + {"call_ompd_enumerate_states", call_ompd_enumerate_states, METH_VARARGS, + "Enumerates OMP states."}, + {"call_ompd_get_state", call_ompd_get_state, METH_VARARGS, + "Returns state for given thread handle."}, + {"call_ompd_get_task_function", call_ompd_get_task_function, METH_VARARGS, + "Returns point of code where task starts executing."}, + {"print_capsule", print_capsule, METH_VARARGS, "Print capsule content"}, + {"call_ompd_get_thread_id", call_ompd_get_thread_id, METH_VARARGS, + "Maps an OMPD thread handle to a native thread."}, + {"call_ompd_get_tool_data", call_ompd_get_tool_data, METH_VARARGS, + "Returns value and pointer of ompd_data_t for given scope and handle."}, + {"call_ompd_get_icv_string_from_scope", call_ompd_get_icv_string_from_scope, + METH_VARARGS, "Gets ICV string representation from scope."}, + + {"test_ompd_get_thread_handle", test_ompd_get_thread_handle, METH_VARARGS, + "Test API ompd_get_thread_handle."}, + {"test_ompd_get_curr_parallel_handle", test_ompd_get_curr_parallel_handle, + METH_VARARGS, "Test API test_ompd_get_curr_parallel_handle."}, + {"test_ompd_get_thread_in_parallel", test_ompd_get_thread_in_parallel, + METH_VARARGS, "Test API ompd_get_thread_in_parallel."}, + {"test_ompd_thread_handle_compare", test_ompd_thread_handle_compare, + METH_VARARGS, "Test API ompd_thread_handle_compare."}, + {"test_ompd_get_thread_id", test_ompd_get_thread_id, METH_VARARGS, + "Test API ompd_get_thread_id."}, + {"test_ompd_rel_thread_handle", test_ompd_rel_thread_handle, METH_VARARGS, + "Test API ompd_rel_thread_handle."}, + {"test_ompd_get_enclosing_parallel_handle", + test_ompd_get_enclosing_parallel_handle, METH_VARARGS, + "Test API ompd_get_enclosing_parallel_handle."}, + {"test_ompd_parallel_handle_compare", test_ompd_parallel_handle_compare, + METH_VARARGS, "Test API test_ompd_parallel_handle_compare."}, + {"test_ompd_rel_parallel_handle", test_ompd_rel_parallel_handle, + METH_VARARGS, "Test API ompd_rel_parallel_handle."}, + + {"test_ompd_initialize", test_ompd_initialize, METH_VARARGS, + "Test API ompd_initialize."}, + {"test_ompd_get_api_version", test_ompd_get_api_version, METH_VARARGS, + "Test API ompd_get_api_version."}, + {"test_ompd_get_version_string", test_ompd_get_version_string, METH_VARARGS, + "Test API ompd_get_version_string."}, + {"test_ompd_finalize", test_ompd_finalize, METH_VARARGS, + "Test API ompd_finalize."}, + {"test_ompd_process_initialize", test_ompd_process_initialize, METH_VARARGS, + "Test API ompd_process_initialize. "}, + {"test_ompd_device_initialize", test_ompd_device_initialize, METH_VARARGS, + "Test API ompd_device_initialize."}, + {"test_ompd_rel_address_space_handle", test_ompd_rel_address_space_handle, + METH_VARARGS, "Test API ompd_rel_address_space_handle."}, + {"test_ompd_get_omp_version", test_ompd_get_omp_version, METH_VARARGS, + "Test API ompd_get_omp_version."}, + {"test_ompd_get_omp_version_string", test_ompd_get_omp_version_string, + METH_VARARGS, "Test API ompd_get_omp_version_string."}, + + {"test_ompd_get_curr_task_handle", test_ompd_get_curr_task_handle, + METH_VARARGS, "Test API ompd_get_curr_task_handle."}, + {"test_ompd_get_task_parallel_handle", test_ompd_get_task_parallel_handle, + METH_VARARGS, "Test API ompd_get_task_parallel_handle."}, + {"test_ompd_get_generating_task_handle", + test_ompd_get_generating_task_handle, METH_VARARGS, + "Test API ompd_get_generating_task_handle."}, + {"test_ompd_get_scheduling_task_handle", + test_ompd_get_scheduling_task_handle, METH_VARARGS, + "Test API ompd_get_scheduling_task_handle."}, + {"test_ompd_get_task_in_parallel", test_ompd_get_task_in_parallel, + METH_VARARGS, "Test API ompd_get_task_in_parallel."}, + {"test_ompd_rel_task_handle", test_ompd_rel_task_handle, METH_VARARGS, + "Test API ompd_rel_task_handle."}, + {"test_ompd_task_handle_compare", test_ompd_task_handle_compare, + METH_VARARGS, "Test API ompd_task_handle_compare."}, + {"test_ompd_get_task_function", test_ompd_get_task_function, METH_VARARGS, + "Test API ompd_get_task_function."}, + {"test_ompd_get_task_frame", test_ompd_get_task_frame, METH_VARARGS, + "Test API ompd_get_task_frame."}, + {"test_ompd_get_state", test_ompd_get_state, METH_VARARGS, + "Test API ompd_get_state."}, + {"test_ompd_get_display_control_vars", test_ompd_get_display_control_vars, + METH_VARARGS, "Test API ompd_get_display_control_vars."}, + {"test_ompd_rel_display_control_vars", test_ompd_rel_display_control_vars, + METH_VARARGS, "Test API ompd_rel_display_control_vars."}, + {"test_ompd_enumerate_icvs", test_ompd_enumerate_icvs, METH_VARARGS, + "Test API ompd_enumerate_icvs."}, + {"test_ompd_get_icv_from_scope_with_addr_handle", + test_ompd_get_icv_from_scope_with_addr_handle, METH_VARARGS, + "Test API ompd_get_icv_from_scope with addr_handle."}, + {"test_ompd_get_icv_from_scope_with_thread_handle", + test_ompd_get_icv_from_scope_with_thread_handle, METH_VARARGS, + "Test API ompd_get_icv_from_scope with thread_handle."}, + {"test_ompd_get_icv_from_scope_with_parallel_handle", + test_ompd_get_icv_from_scope_with_parallel_handle, METH_VARARGS, + "Test API ompd_get_icv_from_scope with parallel_handle."}, + {"test_ompd_get_icv_from_scope_with_task_handle", + test_ompd_get_icv_from_scope_with_task_handle, METH_VARARGS, + "Test API ompd_get_icv_from_scope with task_handle."}, + {"test_ompd_get_icv_string_from_scope", test_ompd_get_icv_string_from_scope, + METH_VARARGS, "Test API ompd_get_icv_string_from_scope."}, + {"test_ompd_get_tool_data", test_ompd_get_tool_data, METH_VARARGS, + "Test API ompd_get_tool_data."}, + {"test_ompd_enumerate_states", test_ompd_enumerate_states, METH_VARARGS, + "Test API ompd_enumerate_states."}, + {NULL, NULL, 0, NULL}}; + +/** + * Lets Python initialize module. + */ +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "ompdModule", /* m_name */ + "This is a module", /* m_doc */ + -1, /* m_size */ + ompdModule_methods, /* m_methods */ + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL, /* m_free */ +}; +#endif +void PyInit_ompdModule(void) { + // (void) Py_InitModule("ompdModule", ompdModule_methods); + PyModule_Create(&moduledef); +}