Index: tools/scan-build-py/bin/analyze-build =================================================================== --- tools/scan-build-py/bin/analyze-build +++ tools/scan-build-py/bin/analyze-build @@ -13,5 +13,5 @@ this_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.dirname(this_dir)) -from libscanbuild.analyze import analyze_build_main -sys.exit(analyze_build_main(this_dir, False)) +from libscanbuild.analyze import analyze_build +sys.exit(analyze_build()) Index: tools/scan-build-py/bin/analyze-build.bat =================================================================== --- tools/scan-build-py/bin/analyze-build.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0analyze-build %* Index: tools/scan-build-py/bin/analyze-c++ =================================================================== --- tools/scan-build-py/bin/analyze-c++ +++ tools/scan-build-py/bin/analyze-c++ @@ -11,4 +11,4 @@ sys.path.append(os.path.dirname(this_dir)) from libscanbuild.analyze import analyze_build_wrapper -sys.exit(analyze_build_wrapper(True)) +sys.exit(analyze_build_wrapper()) Index: tools/scan-build-py/bin/analyze-c++.bat =================================================================== --- tools/scan-build-py/bin/analyze-c++.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0analyze-c++ %* Index: tools/scan-build-py/bin/analyze-cc =================================================================== --- tools/scan-build-py/bin/analyze-cc +++ tools/scan-build-py/bin/analyze-cc @@ -11,4 +11,4 @@ sys.path.append(os.path.dirname(this_dir)) from libscanbuild.analyze import analyze_build_wrapper -sys.exit(analyze_build_wrapper(False)) +sys.exit(analyze_build_wrapper()) Index: tools/scan-build-py/bin/analyze-cc.bat =================================================================== --- tools/scan-build-py/bin/analyze-cc.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0analyze-cc %* Index: tools/scan-build-py/bin/intercept-build =================================================================== --- tools/scan-build-py/bin/intercept-build +++ tools/scan-build-py/bin/intercept-build @@ -14,4 +14,4 @@ sys.path.append(os.path.dirname(this_dir)) from libscanbuild.intercept import intercept_build_main -sys.exit(intercept_build_main(this_dir)) +sys.exit(intercept_build_main()) Index: tools/scan-build-py/bin/intercept-build.bat =================================================================== --- tools/scan-build-py/bin/intercept-build.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0intercept-build %* Index: tools/scan-build-py/bin/intercept-c++ =================================================================== --- tools/scan-build-py/bin/intercept-c++ +++ tools/scan-build-py/bin/intercept-c++ @@ -11,4 +11,4 @@ sys.path.append(os.path.dirname(this_dir)) from libscanbuild.intercept import intercept_build_wrapper -sys.exit(intercept_build_wrapper(True)) +sys.exit(intercept_build_wrapper()) Index: tools/scan-build-py/bin/intercept-c++.bat =================================================================== --- tools/scan-build-py/bin/intercept-c++.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0intercept-c++ %* Index: tools/scan-build-py/bin/intercept-cc =================================================================== --- tools/scan-build-py/bin/intercept-cc +++ tools/scan-build-py/bin/intercept-cc @@ -11,4 +11,4 @@ sys.path.append(os.path.dirname(this_dir)) from libscanbuild.intercept import intercept_build_wrapper -sys.exit(intercept_build_wrapper(False)) +sys.exit(intercept_build_wrapper()) Index: tools/scan-build-py/bin/intercept-cc.bat =================================================================== --- tools/scan-build-py/bin/intercept-cc.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0intercept-cc %* Index: tools/scan-build-py/bin/scan-build =================================================================== --- tools/scan-build-py/bin/scan-build +++ tools/scan-build-py/bin/scan-build @@ -13,5 +13,5 @@ this_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.dirname(this_dir)) -from libscanbuild.analyze import analyze_build_main -sys.exit(analyze_build_main(this_dir, True)) +from libscanbuild.analyze import scan_build +sys.exit(scan_build()) Index: tools/scan-build-py/bin/scan-build.bat =================================================================== --- tools/scan-build-py/bin/scan-build.bat +++ /dev/null @@ -1 +0,0 @@ -python %~dp0scan-build %* Index: tools/scan-build-py/libear/__init__.py =================================================================== --- tools/scan-build-py/libear/__init__.py +++ tools/scan-build-py/libear/__init__.py @@ -68,7 +68,7 @@ @contextlib.contextmanager -def TemporaryDirectory(**kwargs): +def temporary_directory(**kwargs): name = tempfile.mkdtemp(**kwargs) try: yield name @@ -167,7 +167,7 @@ def _try_to_compile_and_link(self, source): try: - with TemporaryDirectory() as work_dir: + with temporary_directory() as work_dir: src_file = 'check.c' with open(os.path.join(work_dir, src_file), 'w') as handle: handle.write(source) @@ -207,9 +207,9 @@ if m: key = m.group(1) if key not in definitions or not definitions[key]: - return '/* #undef {} */\n'.format(key) + return '/* #undef {0} */{1}'.format(key, os.linesep) else: - return '#define {}\n'.format(key) + return '#define {0}{1}'.format(key, os.linesep) return line with open(template, 'r') as src_handle: Index: tools/scan-build-py/libear/ear.c =================================================================== --- tools/scan-build-py/libear/ear.c +++ tools/scan-build-py/libear/ear.c @@ -36,6 +36,7 @@ #if defined HAVE_NSGETENVIRON # include +static char **environ; #else extern char **environ; #endif @@ -65,12 +66,11 @@ typedef char const * bear_env_t[ENV_SIZE]; static int bear_capture_env_t(bear_env_t *env); -static int bear_reset_env_t(bear_env_t *env); static void bear_release_env_t(bear_env_t *env); static char const **bear_update_environment(char *const envp[], bear_env_t *env); static char const **bear_update_environ(char const **in, char const *key, char const *value); -static char **bear_get_environment(); -static void bear_report_call(char const *fun, char const *const argv[]); +static void bear_report_call(char const *const argv[]); +static void bear_write_json_string(char const *word, FILE *stream); static char const **bear_strings_build(char const *arg, va_list *ap); static char const **bear_strings_copy(char const **const in); static char const **bear_strings_append(char const **in, char const *e); @@ -141,6 +141,9 @@ static void on_load(void) { pthread_mutex_lock(&mutex); +#ifdef HAVE_NSGETENVIRON + environ = *_NSGetEnviron(); +#endif if (!initialized) initialized = bear_capture_env_t(&initial_env); pthread_mutex_unlock(&mutex); @@ -159,7 +162,7 @@ #ifdef HAVE_EXECVE int execve(const char *path, char *const argv[], char *const envp[]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_execve(path, argv, envp); } #endif @@ -169,36 +172,35 @@ #error can not implement execv without execve #endif int execv(const char *path, char *const argv[]) { - bear_report_call(__func__, (char const *const *)argv); - char * const * envp = bear_get_environment(); - return call_execve(path, argv, envp); + bear_report_call((char const *const *)argv); + return call_execve(path, argv, environ); } #endif #ifdef HAVE_EXECVPE int execvpe(const char *file, char *const argv[], char *const envp[]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_execvpe(file, argv, envp); } #endif #ifdef HAVE_EXECVP int execvp(const char *file, char *const argv[]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_execvp(file, argv); } #endif #ifdef HAVE_EXECVP2 int execvP(const char *file, const char *search_path, char *const argv[]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_execvP(file, search_path, argv); } #endif #ifdef HAVE_EXECT int exect(const char *path, char *const argv[], char *const envp[]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_exect(path, argv, envp); } #endif @@ -213,9 +215,8 @@ char const **argv = bear_strings_build(arg, &args); va_end(args); - bear_report_call(__func__, (char const *const *)argv); - char * const * envp = bear_get_environment(); - int const result = call_execve(path, (char *const *)argv, envp); + bear_report_call((char const *const *)argv); + int const result = call_execve(path, (char *const *)argv, environ); bear_strings_release(argv); return result; @@ -232,7 +233,7 @@ char const **argv = bear_strings_build(arg, &args); va_end(args); - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); int const result = call_execvp(file, (char *const *)argv); bear_strings_release(argv); @@ -252,7 +253,7 @@ char const **envp = va_arg(args, char const **); va_end(args); - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); int const result = call_execve(path, (char *const *)argv, (char *const *)envp); @@ -266,7 +267,7 @@ const posix_spawn_file_actions_t *file_actions, const posix_spawnattr_t *restrict attrp, char *const argv[restrict], char *const envp[restrict]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_posix_spawn(pid, path, file_actions, attrp, argv, envp); } #endif @@ -276,7 +277,7 @@ const posix_spawn_file_actions_t *file_actions, const posix_spawnattr_t *restrict attrp, char *const argv[restrict], char *const envp[restrict]) { - bear_report_call(__func__, (char const *const *)argv); + bear_report_call((char const *const *)argv); return call_posix_spawnp(pid, file, file_actions, attrp, argv, envp); } #endif @@ -318,12 +319,12 @@ DLSYM(func, fp, "execvp"); - bear_env_t current_env; - bear_capture_env_t(¤t_env); - bear_reset_env_t(&initial_env); + char **const original = environ; + char const **const modified = bear_update_environment(original, &initial_env); + environ = (char **)modified; int const result = (*fp)(file, argv); - bear_reset_env_t(¤t_env); - bear_release_env_t(¤t_env); + environ = original; + bear_strings_release(modified); return result; } @@ -336,12 +337,12 @@ DLSYM(func, fp, "execvP"); - bear_env_t current_env; - bear_capture_env_t(¤t_env); - bear_reset_env_t(&initial_env); + char **const original = environ; + char const **const modified = bear_update_environment(original, &initial_env); + environ = (char **)modified; int const result = (*fp)(file, search_path, argv); - bear_reset_env_t(¤t_env); - bear_release_env_t(¤t_env); + environ = original; + bear_strings_release(modified); return result; } @@ -405,10 +406,7 @@ /* this method is to write log about the process creation. */ -static void bear_report_call(char const *fun, char const *const argv[]) { - static int const GS = 0x1d; - static int const RS = 0x1e; - static int const US = 0x1f; +static void bear_report_call(char const *const argv[]) { if (!initialized) return; @@ -420,26 +418,36 @@ exit(EXIT_FAILURE); } char const * const out_dir = initial_env[0]; + // generate report file path. file name will be "_.json" + // it needs to append an index field, since pid is not unique. (many + // compiler wrapper just exec another file, therefore sharing pid.) size_t const path_max_length = strlen(out_dir) + 32; char filename[path_max_length]; - if (-1 == snprintf(filename, path_max_length, "%s/%d.cmd", out_dir, getpid())) { - perror("bear: snprintf"); - exit(EXIT_FAILURE); + for (int idx = 0; idx < 100; ++idx) { + if (-1 == snprintf(filename, path_max_length, "%s/%d_%d.json", out_dir, getpid(), idx)) { + perror("bear: snprintf"); + exit(EXIT_FAILURE); + } + if (-1 == access(filename, W_OK)) { + break; + } } - FILE * fd = fopen(filename, "a+"); + FILE * fd = fopen(filename, "w+"); if (0 == fd) { perror("bear: fopen"); exit(EXIT_FAILURE); } - fprintf(fd, "%d%c", getpid(), RS); - fprintf(fd, "%d%c", getppid(), RS); - fprintf(fd, "%s%c", fun, RS); - fprintf(fd, "%s%c", cwd, RS); - size_t const argc = bear_strings_length(argv); - for (size_t it = 0; it < argc; ++it) { - fprintf(fd, "%s%c", argv[it], US); + // dump the content in JSON format + fprintf(fd, "{ \"pid\": %d, \"cmd\": [", getpid()); + for (char const *const *it = argv; (it) && (*it); ++it) { + if (it != argv) { + fputc(',', fd); + } + bear_write_json_string(*it, fd); } - fprintf(fd, "%c", GS); + fputs("], \"cwd\": ", fd); + bear_write_json_string(cwd, fd); + fputc('}', fd); if (fclose(fd)) { perror("bear: fclose"); exit(EXIT_FAILURE); @@ -448,6 +456,36 @@ pthread_mutex_unlock(&mutex); } +static void bear_write_json_string(char const *word, FILE *fd) { + fputc('"', fd); + for (char const * it = word; *it; ++it) { + char const current = *it; + switch (current) { + case '\b': + fputs("\\b", fd); + break; + case '\f': + fputs("\\f", fd); + break; + case '\n': + fputs("\\n", fd); + break; + case '\r': + fputs("\\r", fd); + break; + case '\t': + fputs("\\t", fd); + break; + case '"': + case '\\': + fputc('\\', fd); + default: + fputc(current, fd); + } + } + fputc('"', fd); +} + /* update environment assure that chilren processes will copy the desired * behaviour */ @@ -462,18 +500,6 @@ return status; } -static int bear_reset_env_t(bear_env_t *env) { - int status = 1; - for (size_t it = 0; it < ENV_SIZE; ++it) { - if ((*env)[it]) { - setenv(env_names[it], (*env)[it], 1); - } else { - unsetenv(env_names[it]); - } - } - return status; -} - static void bear_release_env_t(bear_env_t *env) { for (size_t it = 0; it < ENV_SIZE; ++it) { free((void *)(*env)[it]); @@ -518,14 +544,6 @@ return bear_strings_append(envs, env); } -static char **bear_get_environment() { -#if defined HAVE_NSGETENVIRON - return *_NSGetEnviron(); -#else - return environ; -#endif -} - /* util methods to deal with string arrays. environment and process arguments * are both represented as string arrays. */ Index: tools/scan-build-py/libscanbuild/__init__.py =================================================================== --- tools/scan-build-py/libscanbuild/__init__.py +++ tools/scan-build-py/libscanbuild/__init__.py @@ -3,71 +3,125 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. -""" -This module responsible to run the Clang static analyzer against any build -and generate reports. -""" +""" This module is a collection of methods commonly used in this project. """ +import collections +import functools +import json +import logging +import os +import os.path +import re +import shlex +import subprocess +import sys +ENVIRONMENT_KEY = 'INTERCEPT_BUILD' -def duplicate_check(method): - """ Predicate to detect duplicated entries. +Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd']) - Unique hash method can be use to detect duplicates. Entries are - represented as dictionaries, which has no default hash method. - This implementation uses a set datatype to store the unique hash values. - This method returns a method which can detect the duplicate values. """ +def shell_split(string): + """ Takes a command string and returns as a list. """ - def predicate(entry): - entry_hash = predicate.unique(entry) - if entry_hash not in predicate.state: - predicate.state.add(entry_hash) - return False - return True + def unescape(arg): + """ Gets rid of the escaping characters. """ - predicate.unique = method - predicate.state = set() - return predicate + if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"': + return re.sub(r'\\(["\\])', r'\1', arg[1:-1]) + return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg) - -def tempdir(): - """ Return the default temorary directory. """ - - from os import getenv - return getenv('TMPDIR', getenv('TEMP', getenv('TMP', '/tmp'))) + return [unescape(token) for token in shlex.split(string)] -def initialize_logging(verbose_level): - """ Output content controlled by the verbosity level. """ - - import sys - import os.path - import logging +def tempdir(): + """ Return the default temporary directory. """ + + return os.getenv('TMPDIR', os.getenv('TEMP', os.getenv('TMP', '/tmp'))) + + +def run_build(command, *args, **kwargs): + """ Run and report build command execution + + :param command: array of tokens + :return: exit code of the process + """ + environment = kwargs.get('env', os.environ) + logging.debug('run build %s, in environment: %s', command, environment) + exit_code = subprocess.call(command, *args, **kwargs) + logging.debug('build finished with exit code: %d', exit_code) + return exit_code + + +def run_command(command, cwd=None): + """ Run a given command and report the execution. + + :param command: array of tokens + :param cwd: the working directory where the command will be executed + :return: output of the command + """ + def decode_when_needed(result): + """ check_output returns bytes or string depend on python version """ + return result.decode('utf-8') if isinstance(result, bytes) else result + + try: + directory = os.path.abspath(cwd) if cwd else os.getcwd() + logging.debug('exec command %s in %s', command, directory) + output = subprocess.check_output(command, + cwd=directory, + stderr=subprocess.STDOUT) + return decode_when_needed(output).splitlines() + except subprocess.CalledProcessError as ex: + ex.output = decode_when_needed(ex.output).splitlines() + raise ex + + +def reconfigure_logging(verbose_level): + """ Reconfigure logging level and format based on the verbose flag. + + :param verbose_level: number of `-v` flags received by the command + :return: no return value + """ + # exit when nothing to do + if verbose_level == 0: + return + + root = logging.getLogger() + # tune level level = logging.WARNING - min(logging.WARNING, (10 * verbose_level)) - + root.setLevel(level) + # be verbose with messages if verbose_level <= 3: - fmt_string = '{0}: %(levelname)s: %(message)s' + fmt_string = '%(name)s: %(levelname)s: %(message)s' else: - fmt_string = '{0}: %(levelname)s: %(funcName)s: %(message)s' - - program = os.path.basename(sys.argv[0]) - logging.basicConfig(format=fmt_string.format(program), level=level) + fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s' + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter(fmt=fmt_string)) + root.handlers = [handler] def command_entry_point(function): - """ Decorator for command entry points. """ + """ Decorator for command entry methods. + + The decorator initialize/shutdown logging and guard on programming + errors (catch exceptions). - import functools - import logging + The decorated method can have arbitrary parameters, the return value will + be the exit code of the process. """ @functools.wraps(function) def wrapper(*args, **kwargs): + """ Do housekeeping tasks and execute the wrapped method. """ - exit_code = 127 try: - exit_code = function(*args, **kwargs) + logging.basicConfig(format='%(name)s: %(message)s', + level=logging.WARNING, + stream=sys.stdout) + # this hack to get the executable name as %(name) + logging.getLogger().name = os.path.basename(sys.argv[0]) + return function(*args, **kwargs) except KeyboardInterrupt: - logging.warning('Keyboard interupt') + logging.warning('Keyboard interrupt') + return 130 # signal received exit code for bash except Exception: logging.exception('Internal error.') if logging.getLogger().isEnabledFor(logging.DEBUG): @@ -75,8 +129,70 @@ "to the bug report") else: logging.error("Please run this command again and turn on " - "verbose mode (add '-vvv' as argument).") + "verbose mode (add '-vvvv' as argument).") + return 64 # some non used exit code for internal errors finally: - return exit_code + logging.shutdown() + + return wrapper + + +def wrapper_entry_point(function): + """ Decorator for wrapper command entry methods. + + The decorator itself execute the real compiler call. Then it calls the + decorated method. The method will receive dictionary of parameters. + + - execution: the command executed by the wrapper. + - result: the exit code of the compilation. + + The return value will be the exit code of the compiler call. (The + decorated method return value is ignored.) + + If the decorated method throws exception, it will be caught and logged. """ + + @functools.wraps(function) + def wrapper(): + """ It executes the compilation and calls the wrapped method. """ + + # get relevant parameters from environment + parameters = json.loads(os.environ[ENVIRONMENT_KEY]) + # set logging level when needed + verbose = parameters['verbose'] + reconfigure_logging(verbose) + # find out what is the real compiler (wrapper names encode the + # compiler type. C++ compiler wrappers ends with `c++`, but might + # have `.exe` extension on windows) + wrapper_command = os.path.basename(sys.argv[0]) + is_cxx = re.match(r'(.+)c\+\+(.*)', wrapper_command) + real_compiler = parameters['cxx'] if is_cxx else parameters['cc'] + # execute compilation with the real compiler + command = real_compiler + sys.argv[1:] + logging.debug('compilation: %s', command) + result = subprocess.call(command) + logging.debug('compilation exit code: %d', result) + # call the wrapped method and ignore it's return value ... + try: + call = Execution( + pid=os.getpid(), + cwd=os.getcwd(), + cmd=['c++' if is_cxx else 'cc'] + sys.argv[1:]) + function(execution=call, result=result) + except: + logging.exception('Compiler wrapper failed complete.') + # ... return the real compiler exit code instead. + return result return wrapper + + +def wrapper_environment(args): + """ Set up environment for interpose compiler wrapper.""" + + return { + ENVIRONMENT_KEY: json.dumps({ + 'verbose': args.verbose, + 'cc': shell_split(args.cc), + 'cxx': shell_split(args.cxx) + }) + } Index: tools/scan-build-py/libscanbuild/analyze.py =================================================================== --- tools/scan-build-py/libscanbuild/analyze.py +++ tools/scan-build-py/libscanbuild/analyze.py @@ -11,72 +11,76 @@ -- Analyze: run the analyzer against the captured commands, -- Report: create a cover report from the analyzer outputs. """ -import sys import re import os import os.path import json -import argparse import logging -import subprocess import multiprocessing -from libscanbuild import initialize_logging, tempdir, command_entry_point -from libscanbuild.runner import run +import tempfile +import functools +import subprocess +import platform +import contextlib +import datetime + +from libscanbuild import command_entry_point, wrapper_entry_point, \ + wrapper_environment, run_build, run_command +from libscanbuild.arguments import scan, analyze from libscanbuild.intercept import capture -from libscanbuild.report import report_directory, document -from libscanbuild.clang import get_checkers -from libscanbuild.compilation import split_command +from libscanbuild.report import document +from libscanbuild.compilation import Compilation, classify_source, \ + CompilationDatabase +from libscanbuild.clang import get_version, get_arguments -__all__ = ['analyze_build_main', 'analyze_build_wrapper'] +__all__ = ['scan_build', 'analyze_build', 'analyze_build_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' +ENVIRONMENT_KEY = 'ANALYZE_BUILD' @command_entry_point -def analyze_build_main(bin_dir, from_build_command): - """ Entry point for 'analyze-build' and 'scan-build'. """ - - parser = create_parser(from_build_command) - args = parser.parse_args() - validate(parser, args, from_build_command) - - # setup logging - initialize_logging(args.verbose) - logging.debug('Parsed arguments: %s', args) - - with report_directory(args.output, args.keep_empty) as target_dir: - if not from_build_command: - # run analyzer only and generate cover report - run_analyzer(args, target_dir) - number_of_bugs = document(args, target_dir, True) - return number_of_bugs if args.status_bugs else 0 - elif args.intercept_first: - # run build command and capture compiler executions - exit_code = capture(args, bin_dir) - # next step to run the analyzer against the captured commands +def scan_build(): + """ Entry point for scan-build command. """ + + args = scan() + # will re-assign the report directory as new output + with report_directory(args.output, args.keep_empty) as args.output: + # run against a build command. there are cases, when analyzer run + # is not required. but we need to set up everything for the + # wrappers, because 'configure' needs to capture the CC/CXX values + # for the Makefile. + if args.intercept_first: + # run build command with intercept module + exit_code, compilations = capture(args) if need_analyzer(args.build): - run_analyzer(args, target_dir) - # cover report generation and bug counting - number_of_bugs = document(args, target_dir, True) - # remove the compilation database when it was not requested - if os.path.exists(args.cdb): - os.unlink(args.cdb) - # set exit status as it was requested - return number_of_bugs if args.status_bugs else exit_code - else: - return exit_code + # run the analyzer against the captured commands + run_analyzer_parallel(compilations, args) else: - # run the build command with compiler wrappers which - # execute the analyzer too. (interposition) - environment = setup_environment(args, target_dir, bin_dir) - logging.debug('run build in environment: %s', environment) - exit_code = subprocess.call(args.build, env=environment) - logging.debug('build finished with exit code: %d', exit_code) - # cover report generation and bug counting - number_of_bugs = document(args, target_dir, False) - # set exit status as it was requested - return number_of_bugs if args.status_bugs else exit_code + # run build command and analyzer with compiler wrappers + environment = setup_environment(args) + exit_code = run_build(args.build, env=environment) + # cover report generation and bug counting + number_of_bugs = document(args) + # set exit status as it was requested + return number_of_bugs if args.status_bugs else exit_code + + +@command_entry_point +def analyze_build(): + """ Entry point for analyze-build command. """ + + args = analyze() + # will re-assign the report directory as new output + with report_directory(args.output, args.keep_empty) as args.output: + # run the analyzer against a compilation db + compilations = CompilationDatabase.load(args.cdb) + run_analyzer_parallel(compilations, args) + # cover report generation and bug counting + number_of_bugs = document(args) + # set exit status as it was requested + return number_of_bugs if args.status_bugs else 0 def need_analyzer(args): @@ -85,432 +89,453 @@ When static analyzer run against project configure step, it should be silent and no need to run the analyzer or generate report. - To run `scan-build` against the configure step might be neccessary, + To run `scan-build` against the configure step might be necessary, when compiler wrappers are used. That's the moment when build setup check the compiler and capture the location for the build process. """ return len(args) and not re.search('configure|autogen', args[0]) -def run_analyzer(args, output_dir): - """ Runs the analyzer against the given compilation database. """ +def analyze_parameters(args): + """ Mapping between the command line parameters and the analyzer run + method. The run method works with a plain dictionary, while the command + line parameters are in a named tuple. + The keys are very similar, and some values are preprocessed. """ - def exclude(filename): - """ Return true when any excluded directory prefix the filename. """ - return any(re.match(r'^' + directory, filename) - for directory in args.excludes) + def prefix_with(constant, pieces): + """ From a sequence create another sequence where every second element + is from the original sequence and the odd elements are the prefix. + + eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """ + + return [elem for piece in pieces for elem in [constant, piece]] - consts = { + def direct_args(args): + """ A group of command line arguments can mapped to command + line arguments of the analyzer. """ + + result = [] + + if args.store_model: + result.append('-analyzer-store={0}'.format(args.store_model)) + if args.constraints_model: + result.append('-analyzer-constraints={0}'.format( + args.constraints_model)) + if args.internal_stats: + result.append('-analyzer-stats') + if args.analyze_headers: + result.append('-analyzer-opt-analyze-headers') + if args.stats: + result.append('-analyzer-checker=debug.Stats') + if args.maxloop: + result.extend(['-analyzer-max-loop', str(args.maxloop)]) + if args.output_format: + result.append('-analyzer-output={0}'.format(args.output_format)) + if args.analyzer_config: + result.append(args.analyzer_config) + if args.verbose >= 4: + result.append('-analyzer-display-progress') + if args.plugins: + result.extend(prefix_with('-load', args.plugins)) + if args.enable_checker: + checkers = ','.join(args.enable_checker) + result.extend(['-analyzer-checker', checkers]) + if args.disable_checker: + checkers = ','.join(args.disable_checker) + result.extend(['-analyzer-disable-checker', checkers]) + if os.getenv('UBIVIZ'): + result.append('-analyzer-viz-egraph-ubigraph') + + return prefix_with('-Xclang', result) + + return { 'clang': args.clang, - 'output_dir': output_dir, + 'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, - 'direct_args': analyzer_params(args), - 'force_debug': args.force_debug + 'direct_args': direct_args(args), + 'force_debug': args.force_debug, + 'excludes': args.excludes } + +def run_analyzer_parallel(compilations, args): + """ Runs the analyzer against the given compilations. """ + logging.debug('run analyzer against compilation database') - with open(args.cdb, 'r') as handle: - generator = (dict(cmd, **consts) - for cmd in json.load(handle) if not exclude(cmd['file'])) - # when verbose output requested execute sequentially - pool = multiprocessing.Pool(1 if args.verbose > 2 else None) - for current in pool.imap_unordered(run, generator): - if current is not None: - # display error message from the static analyzer - for line in current['error_output']: - logging.info(line.rstrip()) - pool.close() - pool.join() - - -def setup_environment(args, destination, bin_dir): + consts = analyze_parameters(args) + parameters = (dict(compilation.to_analyzer(), **consts) + for compilation in compilations) + # when verbose output requested execute sequentially + pool = multiprocessing.Pool(1 if args.verbose > 2 else None) + for current in pool.imap_unordered(run, parameters): + logging_analyzer_output(current) + pool.close() + pool.join() + + +def setup_environment(args): """ Set up environment for build command to interpose compiler wrapper. """ environment = dict(os.environ) + # to run compiler wrappers + environment.update(wrapper_environment(args)) environment.update({ - 'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC), - 'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX), - 'ANALYZE_BUILD_CC': args.cc, - 'ANALYZE_BUILD_CXX': args.cxx, - 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', - 'ANALYZE_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'WARNING', - 'ANALYZE_BUILD_REPORT_DIR': destination, - 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, - 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', - 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), - 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '' + 'CC': COMPILER_WRAPPER_CC, + 'CXX': COMPILER_WRAPPER_CXX }) + # pass the relevant parameters to run the analyzer with condition. + # the presence of the environment value will control the run. + if need_analyzer(args.build): + environment.update({ + ENVIRONMENT_KEY: json.dumps(analyze_parameters(args)) + }) + else: + logging.debug('wrapper should not run analyzer') return environment -def analyze_build_wrapper(cplusplus): +@command_entry_point +@wrapper_entry_point +def analyze_build_wrapper(**kwargs): """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """ - # initialize wrapper logging - logging.basicConfig(format='analyze: %(levelname)s: %(message)s', - level=os.getenv('ANALYZE_BUILD_VERBOSE', 'INFO')) - # execute with real compiler - compiler = os.getenv('ANALYZE_BUILD_CXX', 'c++') if cplusplus \ - else os.getenv('ANALYZE_BUILD_CC', 'cc') - compilation = [compiler] + sys.argv[1:] - logging.info('execute compiler: %s', compilation) - result = subprocess.call(compilation) - # exit when it fails, ... - if result or not os.getenv('ANALYZE_BUILD_CLANG'): - return result - # ... and run the analyzer if all went well. + # don't run analyzer when compilation fails. or when it's not requested. + if kwargs['result'] or not os.getenv(ENVIRONMENT_KEY): + return + # collect the needed parameters from environment + parameters = json.loads(os.environ[ENVIRONMENT_KEY]) + # don't run analyzer when the command is not a compilation. + # (filtering non compilations is done by the generator.) + for entry in Compilation.from_call(kwargs['execution']): + current = dict(entry.to_analyzer(), **parameters) + logging_analyzer_output(run(current)) + + +@contextlib.contextmanager +def report_directory(hint, keep): + """ Responsible for the report directory. + + hint -- could specify the parent directory of the output directory. + keep -- a boolean value to keep or delete the empty report directory. """ + + stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' + stamp = datetime.datetime.now().strftime(stamp_format) + parent_dir = os.path.abspath(hint) + if not os.path.exists(parent_dir): + os.makedirs(parent_dir) + name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) + + logging.info('Report directory created: %s', name) + try: - # check is it a compilation - compilation = split_command(sys.argv) - if compilation is None: - return result - # collect the needed parameters from environment, crash when missing - parameters = { - 'clang': os.getenv('ANALYZE_BUILD_CLANG'), - 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), - 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), - 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), - 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', - '').split(' '), - 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), - 'directory': os.getcwd(), - 'command': [sys.argv[0], '-c'] + compilation.flags - } - # call static analyzer against the compilation - for source in compilation.files: - parameters.update({'file': source}) - logging.debug('analyzer parameters %s', parameters) - current = run(parameters) - # display error message from the static analyzer - if current is not None: - for line in current['error_output']: - logging.info(line.rstrip()) + yield name + finally: + if os.listdir(name): + msg = "Run 'scan-view %s' to examine bug reports." + keep = True + else: + if keep: + msg = "Report directory '%s' contains no report, but kept." + else: + msg = "Removing directory '%s' because it contains no report." + logging.warning(msg, name) + + if not keep: + os.rmdir(name) + + +def require(required): + """ Decorator for checking the required values in state. + + It checks the required attributes in the passed state and stop when + any of those is missing. """ + + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + for key in required: + assert key in args[0], '{} is missing'.format(key) + + return function(*args, **kwargs) + + return wrapper + + return decorator + + +@require(['flags', # entry from compilation + 'compiler', # entry from compilation + 'directory', # entry from compilation + 'source', # entry from compilation + 'clang', # clang executable name (and path) + 'direct_args', # arguments from command line + 'excludes', # list of directories + 'force_debug', # kill non debug macros + 'output_dir', # where generated report files shall go + 'output_format', # it's 'plist' or 'html' or both + 'output_failures']) # generate crash reports or not +def run(opts): + """ Entry point to run (or not) static analyzer against a single entry + of the compilation database. + + This complex task is decomposed into smaller methods which are calling + each other in chain. If the analyzis is not possibe the given method + just return and break the chain. + + The passed parameter is a python dictionary. Each method first check + that the needed parameters received. (This is done by the 'require' + decorator. It's like an 'assert' to check the contract between the + caller and the called method.) """ + + try: + command = [opts['compiler'], '-c'] + opts['flags'] + [opts['source']] + logging.debug("Run analyzer against '%s'", command) + return exclude(opts) except Exception: - logging.exception("run analyzer inside compiler wrapper failed.") - return result + logging.error("Problem occured during analyzis.", exc_info=1) + return None + + +def logging_analyzer_output(opts): + """ Display error message from analyzer. """ + + if opts and 'error_output' in opts: + for line in opts['error_output']: + logging.info(line) + + +@require(['clang', 'directory', 'flags', 'source', 'output_dir', 'language', + 'error_output', 'exit_code']) +def report_failure(opts): + """ Create report when analyzer failed. + + The major report is the preprocessor output. The output filename generated + randomly. The compiler output also captured into '.stderr.txt' file. + And some more execution context also saved into '.info.txt' file. """ + + def extension(): + """ Generate preprocessor file extension. """ + + mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} + return mapping.get(opts['language'], '.i') + + def destination(): + """ Creates failures directory if not exits yet. """ + + failures_dir = os.path.join(opts['output_dir'], 'failures') + if not os.path.isdir(failures_dir): + os.makedirs(failures_dir) + return failures_dir + + # Classify error type: when Clang terminated by a signal it's a 'Crash'. + # (python subprocess Popen.returncode is negative when child terminated + # by signal.) Everything else is 'Other Error'. + error = 'crash' if opts['exit_code'] < 0 else 'other_error' + # Create preprocessor output file name. (This is blindly following the + # Perl implementation.) + (handle, name) = tempfile.mkstemp(suffix=extension(), + prefix='clang_' + error + '_', + dir=destination()) + os.close(handle) + # Execute Clang again, but run the syntax check only. + cwd = opts['directory'] + cmd = get_arguments( + [opts['clang'], '-fsyntax-only', '-E' + ] + opts['flags'] + [opts['source'], '-o', name], cwd) + run_command(cmd, cwd=cwd) + # write general information about the crash + with open(name + '.info.txt', 'w') as handle: + handle.write(opts['source'] + os.linesep) + handle.write(error.title().replace('_', ' ') + os.linesep) + handle.write(' '.join(cmd) + os.linesep) + handle.write(' '.join(platform.uname()) + os.linesep) + handle.write(get_version(opts['clang'])) + handle.close() + # write the captured output too + with open(name + '.stderr.txt', 'w') as handle: + handle.write(opts['error_output']) + handle.close() + + +@require(['clang', 'directory', 'flags', 'direct_args', 'source', 'output_dir', + 'output_format']) +def run_analyzer(opts, continuation=report_failure): + """ It assembles the analysis command line and executes it. Capture the + output of the analysis and returns with it. If failure reports are + requested, it calls the continuation to generate it. """ + + def target(): + """ Creates output file name for reports. """ + if opts['output_format'] in {'plist', 'plist-html'}: + (handle, name) = tempfile.mkstemp(prefix='report-', + suffix='.plist', + dir=opts['output_dir']) + os.close(handle) + return name + return opts['output_dir'] + try: + cwd = opts['directory'] + cmd = get_arguments([opts['clang'], '--analyze'] + + opts['direct_args'] + opts['flags'] + + [opts['source'], '-o', target()], + cwd) + output = run_command(cmd, cwd=cwd) + return {'error_output': output, 'exit_code': 0} + except subprocess.CalledProcessError as ex: + result = {'error_output': ex.output, 'exit_code': ex.returncode} + if opts.get('output_failures', False): + opts.update(result) + continuation(opts) + return result -def analyzer_params(args): - """ A group of command line arguments can mapped to command - line arguments of the analyzer. This method generates those. """ - def prefix_with(constant, pieces): - """ From a sequence create another sequence where every second element - is from the original sequence and the odd elements are the prefix. +@require(['flags', 'force_debug']) +def filter_debug_flags(opts, continuation=run_analyzer): + """ Filter out nondebug macros when requested. """ - eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """ + if opts.pop('force_debug'): + # lazy implementation just append an undefine macro at the end + opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) - return [elem for piece in pieces for elem in [constant, piece]] + return continuation(opts) + + +@require(['language', 'compiler', 'source', 'flags']) +def language_check(opts, continuation=filter_debug_flags): + """ Find out the language from command line parameters or file name + extension. The decision also influenced by the compiler invocation. """ - result = [] - - if args.store_model: - result.append('-analyzer-store={0}'.format(args.store_model)) - if args.constraints_model: - result.append('-analyzer-constraints={0}'.format( - args.constraints_model)) - if args.internal_stats: - result.append('-analyzer-stats') - if args.analyze_headers: - result.append('-analyzer-opt-analyze-headers') - if args.stats: - result.append('-analyzer-checker=debug.Stats') - if args.maxloop: - result.extend(['-analyzer-max-loop', str(args.maxloop)]) - if args.output_format: - result.append('-analyzer-output={0}'.format(args.output_format)) - if args.analyzer_config: - result.append(args.analyzer_config) - if args.verbose >= 4: - result.append('-analyzer-display-progress') - if args.plugins: - result.extend(prefix_with('-load', args.plugins)) - if args.enable_checker: - checkers = ','.join(args.enable_checker) - result.extend(['-analyzer-checker', checkers]) - if args.disable_checker: - checkers = ','.join(args.disable_checker) - result.extend(['-analyzer-disable-checker', checkers]) - if os.getenv('UBIVIZ'): - result.append('-analyzer-viz-egraph-ubigraph') - - return prefix_with('-Xclang', result) - - -def print_active_checkers(checkers): - """ Print active checkers to stdout. """ - - for name in sorted(name for name, (_, active) in checkers.items() - if active): - print(name) - - -def print_checkers(checkers): - """ Print verbose checker help to stdout. """ - - print('') - print('available checkers:') - print('') - for name in sorted(checkers.keys()): - description, active = checkers[name] - prefix = '+' if active else ' ' - if len(name) > 30: - print(' {0} {1}'.format(prefix, name)) - print(' ' * 35 + description) + accepted = frozenset({ + 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', + 'c++-cpp-output', 'objective-c-cpp-output' + }) + + # language can be given as a parameter... + language = opts.pop('language') + compiler = opts.pop('compiler') + # ... or find out from source file extension + if language is None and compiler is not None: + language = classify_source(opts['source'], compiler == 'c') + + if language is None: + logging.debug('skip analysis, language not known') + return None + elif language not in accepted: + logging.debug('skip analysis, language not supported') + return None + else: + logging.debug('analysis, language: %s', language) + opts.update({'language': language, + 'flags': ['-x', language] + opts['flags']}) + return continuation(opts) + + +@require(['arch_list', 'flags']) +def arch_check(opts, continuation=language_check): + """ Do run analyzer through one of the given architectures. """ + + disabled = frozenset({'ppc', 'ppc64'}) + + received_list = opts.pop('arch_list') + if received_list: + # filter out disabled architectures and -arch switches + filtered_list = [a for a in received_list if a not in disabled] + if filtered_list: + # There should be only one arch given (or the same multiple + # times). If there are multiple arch are given and are not + # the same, those should not change the pre-processing step. + # But that's the only pass we have before run the analyzer. + current = filtered_list.pop() + logging.debug('analysis, on arch: %s', current) + + opts.update({'flags': ['-arch', current] + opts['flags']}) + return continuation(opts) else: - print(' {0} {1: <30} {2}'.format(prefix, name, description)) - print('') - print('NOTE: "+" indicates that an analysis is enabled by default.') - print('') - - -def validate(parser, args, from_build_command): - """ Validation done by the parser itself, but semantic check still - needs to be done. This method is doing that. """ - - # Make plugins always a list. (It might be None when not specified.) - args.plugins = args.plugins if args.plugins else [] - - if args.help_checkers_verbose: - print_checkers(get_checkers(args.clang, args.plugins)) - parser.exit() - elif args.help_checkers: - print_active_checkers(get_checkers(args.clang, args.plugins)) - parser.exit() - - if from_build_command and not args.build: - parser.error('missing build command') - - -def create_parser(from_build_command): - """ Command line argument parser factory method. """ - - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument( - '--verbose', '-v', - action='count', - default=0, - help="""Enable verbose output from '%(prog)s'. A second and third - flag increases verbosity.""") - parser.add_argument( - '--override-compiler', - action='store_true', - help="""Always resort to the compiler wrapper even when better - interposition methods are available.""") - parser.add_argument( - '--intercept-first', - action='store_true', - help="""Run the build commands only, build a compilation database, - then run the static analyzer afterwards. - Generally speaking it has better coverage on build commands. - With '--override-compiler' it use compiler wrapper, but does - not run the analyzer till the build is finished. """) - parser.add_argument( - '--cdb', - metavar='', - default="compile_commands.json", - help="""The JSON compilation database.""") - - parser.add_argument( - '--output', '-o', - metavar='', - default=tempdir(), - help="""Specifies the output directory for analyzer reports. - Subdirectory will be created if default directory is targeted. - """) - parser.add_argument( - '--status-bugs', - action='store_true', - help="""By default, the exit status of '%(prog)s' is the same as the - executed build command. Specifying this option causes the exit - status of '%(prog)s' to be non zero if it found potential bugs - and zero otherwise.""") - parser.add_argument( - '--html-title', - metavar='', - help="""Specify the title used on generated HTML pages. - If not specified, a default title will be used.""") - parser.add_argument( - '--analyze-headers', - action='store_true', - help="""Also analyze functions in #included files. By default, such - functions are skipped unless they are called by functions - within the main source file.""") - format_group = parser.add_mutually_exclusive_group() - format_group.add_argument( - '--plist', '-plist', - dest='output_format', - const='plist', - default='html', - action='store_const', - help="""This option outputs the results as a set of .plist files.""") - format_group.add_argument( - '--plist-html', '-plist-html', - dest='output_format', - const='plist-html', - default='html', - action='store_const', - help="""This option outputs the results as a set of .html and .plist - files.""") - # TODO: implement '-view ' - - advanced = parser.add_argument_group('advanced options') - advanced.add_argument( - '--keep-empty', - action='store_true', - help="""Don't remove the build results directory even if no issues - were reported.""") - advanced.add_argument( - '--no-failure-reports', '-no-failure-reports', - dest='output_failures', - action='store_false', - help="""Do not create a 'failures' subdirectory that includes analyzer - crash reports and preprocessed source files.""") - advanced.add_argument( - '--stats', '-stats', - action='store_true', - help="""Generates visitation statistics for the project being analyzed. - """) - advanced.add_argument( - '--internal-stats', - action='store_true', - help="""Generate internal analyzer statistics.""") - advanced.add_argument( - '--maxloop', '-maxloop', - metavar='<loop count>', - type=int, - help="""Specifiy the number of times a block can be visited before - giving up. Increase for more comprehensive coverage at a cost - of speed.""") - advanced.add_argument( - '--store', '-store', - metavar='<model>', - dest='store_model', - choices=['region', 'basic'], - help="""Specify the store model used by the analyzer. - 'region' specifies a field- sensitive store model. - 'basic' which is far less precise but can more quickly - analyze code. 'basic' was the default store model for - checker-0.221 and earlier.""") - advanced.add_argument( - '--constraints', '-constraints', - metavar='<model>', - dest='constraints_model', - choices=['range', 'basic'], - help="""Specify the contraint engine used by the analyzer. Specifying - 'basic' uses a simpler, less powerful constraint model used by - checker-0.160 and earlier.""") - advanced.add_argument( - '--use-analyzer', - metavar='<path>', - dest='clang', - default='clang', - help="""'%(prog)s' uses the 'clang' executable relative to itself for - static analysis. One can override this behavior with this - option by using the 'clang' packaged with Xcode (on OS X) or - from the PATH.""") - advanced.add_argument( - '--use-cc', - metavar='<path>', - dest='cc', - default='cc', - help="""When '%(prog)s' analyzes a project by interposing a "fake - compiler", which executes a real compiler for compilation and - do other tasks (to run the static analyzer or just record the - compiler invocation). Because of this interposing, '%(prog)s' - does not know what compiler your project normally uses. - Instead, it simply overrides the CC environment variable, and - guesses your default compiler. - - If you need '%(prog)s' to use a specific compiler for - *compilation* then you can use this option to specify a path - to that compiler.""") - advanced.add_argument( - '--use-c++', - metavar='<path>', - dest='cxx', - default='c++', - help="""This is the same as "--use-cc" but for C++ code.""") - advanced.add_argument( - '--analyzer-config', '-analyzer-config', - metavar='<options>', - help="""Provide options to pass through to the analyzer's - -analyzer-config flag. Several options are separated with - comma: 'key1=val1,key2=val2' - - Available options: - stable-report-filename=true or false (default) - - Switch the page naming to: - report-<filename>-<function/method name>-<id>.html - instead of report-XXXXXX.html""") - advanced.add_argument( - '--exclude', - metavar='<directory>', - dest='excludes', - action='append', - default=[], - help="""Do not run static analyzer against files found in this - directory. (You can specify this option multiple times.) - Could be usefull when project contains 3rd party libraries. - The directory path shall be absolute path as file names in - the compilation database.""") - advanced.add_argument( - '--force-analyze-debug-code', - dest='force_debug', - action='store_true', - help="""Tells analyzer to enable assertions in code even if they were - disabled during compilation, enabling more precise results.""") - - plugins = parser.add_argument_group('checker options') - plugins.add_argument( - '--load-plugin', '-load-plugin', - metavar='<plugin library>', - dest='plugins', - action='append', - help="""Loading external checkers using the clang plugin interface.""") - plugins.add_argument( - '--enable-checker', '-enable-checker', - metavar='<checker name>', - action=AppendCommaSeparated, - help="""Enable specific checker.""") - plugins.add_argument( - '--disable-checker', '-disable-checker', - metavar='<checker name>', - action=AppendCommaSeparated, - help="""Disable specific checker.""") - plugins.add_argument( - '--help-checkers', - action='store_true', - help="""A default group of checkers is run unless explicitly disabled. - Exactly which checkers constitute the default group is a - function of the operating system in use. These can be printed - with this flag.""") - plugins.add_argument( - '--help-checkers-verbose', - action='store_true', - help="""Print all available checkers and mark the enabled ones.""") - - if from_build_command: - parser.add_argument( - dest='build', - nargs=argparse.REMAINDER, - help="""Command to run.""") - - return parser - - -class AppendCommaSeparated(argparse.Action): - """ argparse Action class to support multiple comma separated lists. """ - - def __call__(self, __parser, namespace, values, __option_string): - # getattr(obj, attr, default) does not really returns default but none - if getattr(namespace, self.dest, None) is None: - setattr(namespace, self.dest, []) - # once it's fixed we can use as expected - actual = getattr(namespace, self.dest) - actual.extend(values.split(',')) - setattr(namespace, self.dest, actual) + logging.debug('skip analysis, found not supported arch') + return None + else: + logging.debug('analysis, on default arch') + return continuation(opts) + +# To have good results from static analyzer certain compiler options shall be +# omitted. The compiler flag filtering only affects the static analyzer run. +# +# Keys are the option name, value number of options to skip +IGNORED_FLAGS = { + '-c': 0, # compile option will be overwritten + '-fsyntax-only': 0, # static analyzer option will be overwritten + '-o': 1, # will set up own output file + # flags below are inherited from the perl implementation. + '-g': 0, + '-save-temps': 0, + '-install_name': 1, + '-exported_symbols_list': 1, + '-current_version': 1, + '-compatibility_version': 1, + '-init': 1, + '-e': 1, + '-seg1addr': 1, + '-bundle_loader': 1, + '-multiply_defined': 1, + '-sectorder': 3, + '--param': 1, + '--serialize-diagnostics': 1 +} + + +@require(['flags']) +def classify_parameters(opts, continuation=arch_check): + """ Prepare compiler flags (filters some and add others) and take out + language (-x) and architecture (-arch) flags for future processing. """ + + # the result of the method + result = { + 'flags': [], # the filtered compiler flags + 'arch_list': [], # list of architecture flags + 'language': None, # compilation language, None, if not specified + } + + # iterate on the compile options + args = iter(opts['flags']) + for arg in args: + # take arch flags into a separate basket + if arg == '-arch': + result['arch_list'].append(next(args)) + # take language + elif arg == '-x': + result['language'] = next(args) + # ignore some flags + elif arg in IGNORED_FLAGS: + count = IGNORED_FLAGS[arg] + for _ in range(count): + next(args) + # we don't care about extra warnings, but we should suppress ones + # that we don't want to see. + elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): + pass + # and consider everything else as compilation flag. + else: + result['flags'].append(arg) + + opts.update(result) + return continuation(opts) + + +@require(['source', 'excludes']) +def exclude(opts, continuation=classify_parameters): + """ Analysis might be skipped, when one of the requested excluded + directory contains the file. """ + + def contains(directory, entry): + # When a directory contains a file, then the relative path to the + # file from that directory does not start with a parent dir prefix. + relative = os.path.relpath(entry, directory).split(os.sep) + return len(relative) and relative[0] != os.pardir + + if any(contains(dir, opts['source']) for dir in opts['excludes']): + logging.debug('skip analysis, file requested to exclude') + return None + else: + return continuation(opts) Index: tools/scan-build-py/libscanbuild/arguments.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/arguments.py @@ -0,0 +1,410 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" The module implements command line interface related duties. + +It uses argparse module to create the command line parser. (This library is +in the standard python library since 3.2 and backported to 2.7, but not +earlier.) + +It also implements basic validation methods, related to the command. +Validations are mostly calling specific help methods, or mangling values. +""" + +import os +import sys +import argparse +import logging +from libscanbuild import reconfigure_logging, tempdir +from libscanbuild.clang import get_checkers + +__all__ = ['intercept', 'analyze', 'scan'] + + +def intercept(): + """ Parse and validate command line arguments. """ + + parser = intercept_parser() + args = parser.parse_args() + + reconfigure_logging(args.verbose) + logging.debug('Raw arguments %s', sys.argv) + + # short validation logic + if not args.build: + parser.error(message='missing build command') + + logging.debug('Parsed arguments: %s', args) + return args + + +def analyze(): + """ Parse and validate command line arguments. """ + + from_build_command = False + parser = analyze_parser(from_build_command) + args = parser.parse_args() + + reconfigure_logging(args.verbose) + logging.debug('Raw arguments %s', sys.argv) + + analyze_validate(parser, args, from_build_command) + logging.debug('Parsed arguments: %s', args) + return args + + +def scan(): + """ Parse and validate command line arguments. """ + + from_build_command = True + parser = analyze_parser(from_build_command) + args = parser.parse_args() + + reconfigure_logging(args.verbose) + logging.debug('Raw arguments %s', sys.argv) + + analyze_validate(parser, args, from_build_command) + logging.debug('Parsed arguments: %s', args) + return args + + +def analyze_validate(parser, args, from_build_command): + """ Validation done by the parser itself, but semantic check still + needs to be done. This method is doing it for analyze related commands.""" + + # Make plugins always a list. (It might be None when not specified.) + args.plugins = args.plugins if args.plugins else [] + # Make sure that these checks are bellow this ^ + if args.help_checkers_verbose: + print_checkers(get_checkers(args.clang, args.plugins)) + parser.exit(status=0) + elif args.help_checkers: + print_active_checkers(get_checkers(args.clang, args.plugins)) + parser.exit(status=0) + elif from_build_command and not args.build: + parser.error(message='missing build command') + elif not from_build_command and not os.path.exists(args.cdb): + parser.error(message='compilation database is missing') + + # Make exclude directory list unique and absolute + uniq_excludes = set(os.path.abspath(entry) for entry in args.excludes) + args.excludes = list(uniq_excludes) + + # because shared codes for all tools, some common used methods are + # expecting some argument to be present. so, instead of query the args + # object about the presence of the flag, we fake it here. to make those + # methods more readable. (it's an arguable choice, took it only for those + # which have good default value.) + if from_build_command: + # add cdb parameter invisibly to make report module working + args.cdb = 'compile_commands.json' + + +def intercept_parser(): + """ Command line argument parser factory method. """ + + parser = parser_create() + parser_add_cdb(parser) + + parser_add_prefer_wrapper(parser) + parser_add_compilers(parser) + + advanced = parser.add_argument_group('advanced options') + group = advanced.add_mutually_exclusive_group() + group.add_argument( + '--append', + action='store_true', + help="""Extend existing compilation database with new entries. + Duplicate entries are detected and not present in the final output. + The output is not continuously updated, it's done when the build + command finished. """) + + parser.add_argument( + dest='build', nargs=argparse.REMAINDER, help="""Command to run.""") + return parser + + +def analyze_parser(from_build_command): + """ Command line argument parser factory method. """ + + parser = parser_create() + + if from_build_command: + parser_add_prefer_wrapper(parser) + parser_add_compilers(parser) + + parser.add_argument( + '--intercept-first', + action='store_true', + help="""Run the build commands first, intercept compiler + calls and then run the static analyzer afterwards. + Generally speaking it has better coverage on build commands. + With '--override-compiler' it use compiler wrapper, but does + not run the analyzer till the build is finished.""") + else: + parser_add_cdb(parser) + + parser.add_argument( + '--status-bugs', + action='store_true', + help="""The exit status of '%(prog)s' is the same as the executed + build command. This option ignores the build exit status and sets to + be non zero if it found potential bugs or zero otherwise.""") + parser.add_argument( + '--exclude', + metavar='<directory>', + dest='excludes', + action='append', + default=[], + help="""Do not run static analyzer against files found in this + directory. (You can specify this option multiple times.) + Could be useful when project contains 3rd party libraries.""") + + output = parser.add_argument_group('output control options') + output.add_argument( + '--output', + '-o', + metavar='<path>', + default=tempdir(), + help="""Specifies the output directory for analyzer reports. + Subdirectory will be created if default directory is targeted.""") + output.add_argument( + '--keep-empty', + action='store_true', + help="""Don't remove the build results directory even if no issues + were reported.""") + output.add_argument( + '--html-title', + metavar='<title>', + help="""Specify the title used on generated HTML pages. + If not specified, a default title will be used.""") + format_group = output.add_mutually_exclusive_group() + format_group.add_argument( + '--plist', + '-plist', + dest='output_format', + const='plist', + default='html', + action='store_const', + help="""Cause the results as a set of .plist files.""") + format_group.add_argument( + '--plist-html', + '-plist-html', + dest='output_format', + const='plist-html', + default='html', + action='store_const', + help="""Cause the results as a set of .html and .plist files.""") + # TODO: implement '-view ' + + advanced = parser.add_argument_group('advanced options') + advanced.add_argument( + '--use-analyzer', + metavar='<path>', + dest='clang', + default='clang', + help="""'%(prog)s' uses the 'clang' executable relative to itself for + static analysis. One can override this behavior with this option by + using the 'clang' packaged with Xcode (on OS X) or from the PATH.""") + advanced.add_argument( + '--no-failure-reports', + '-no-failure-reports', + dest='output_failures', + action='store_false', + help="""Do not create a 'failures' subdirectory that includes analyzer + crash reports and preprocessed source files.""") + parser.add_argument( + '--analyze-headers', + action='store_true', + help="""Also analyze functions in #included files. By default, such + functions are skipped unless they are called by functions within the + main source file.""") + advanced.add_argument( + '--stats', + '-stats', + action='store_true', + help="""Generates visitation statistics for the project.""") + advanced.add_argument( + '--internal-stats', + action='store_true', + help="""Generate internal analyzer statistics.""") + advanced.add_argument( + '--maxloop', + '-maxloop', + metavar='<loop count>', + type=int, + help="""Specifiy the number of times a block can be visited before + giving up. Increase for more comprehensive coverage at a cost of + speed.""") + advanced.add_argument( + '--store', + '-store', + metavar='<model>', + dest='store_model', + choices=['region', 'basic'], + help="""Specify the store model used by the analyzer. 'region' + specifies a field- sensitive store model. 'basic' which is far less + precise but can more quickly analyze code. 'basic' was the default + store model for checker-0.221 and earlier.""") + advanced.add_argument( + '--constraints', + '-constraints', + metavar='<model>', + dest='constraints_model', + choices=['range', 'basic'], + help="""Specify the constraint engine used by the analyzer. Specifying + 'basic' uses a simpler, less powerful constraint model used by + checker-0.160 and earlier.""") + advanced.add_argument( + '--analyzer-config', + '-analyzer-config', + metavar='<options>', + help="""Provide options to pass through to the analyzer's + -analyzer-config flag. Several options are separated with comma: + 'key1=val1,key2=val2' + + Available options: + stable-report-filename=true or false (default) + + Switch the page naming to: + report-<filename>-<function/method name>-<id>.html + instead of report-XXXXXX.html""") + advanced.add_argument( + '--force-analyze-debug-code', + dest='force_debug', + action='store_true', + help="""Tells analyzer to enable assertions in code even if they were + disabled during compilation, enabling more precise results.""") + + plugins = parser.add_argument_group('checker options') + plugins.add_argument( + '--load-plugin', + '-load-plugin', + metavar='<plugin library>', + dest='plugins', + action='append', + help="""Loading external checkers using the clang plugin interface.""") + plugins.add_argument( + '--enable-checker', + '-enable-checker', + metavar='<checker name>', + action=AppendCommaSeparated, + help="""Enable specific checker.""") + plugins.add_argument( + '--disable-checker', + '-disable-checker', + metavar='<checker name>', + action=AppendCommaSeparated, + help="""Disable specific checker.""") + plugins.add_argument( + '--help-checkers', + action='store_true', + help="""A default group of checkers is run unless explicitly disabled. + Exactly which checkers constitute the default group is a function of + the operating system in use. These can be printed with this flag.""") + plugins.add_argument( + '--help-checkers-verbose', + action='store_true', + help="""Print all available checkers and mark the enabled ones.""") + + if from_build_command: + parser.add_argument( + dest='build', nargs=argparse.REMAINDER, help="""Command to run.""") + return parser + + +def parser_create(): + """ Command line argument parser factory method. """ + + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument( + '--verbose', + '-v', + action='count', + default=0, + help="""Enable verbose output from '%(prog)s'. A second, third and + fourth flags increases verbosity.""") + return parser + + +def parser_add_cdb(parser): + parser.add_argument( + '--cdb', + metavar='<file>', + default="compile_commands.json", + help="""The JSON compilation database.""") + + +def parser_add_prefer_wrapper(parser): + parser.add_argument( + '--override-compiler', + action='store_true', + help="""Always resort to the compiler wrapper even when better + intercept methods are available.""") + + +def parser_add_compilers(parser): + parser.add_argument( + '--use-cc', + metavar='<path>', + dest='cc', + default=os.getenv('CC', 'cc'), + help="""When '%(prog)s' analyzes a project by interposing a compiler + wrapper, which executes a real compiler for compilation and do other + tasks (record the compiler invocation). Because of this interposing, + '%(prog)s' does not know what compiler your project normally uses. + Instead, it simply overrides the CC environment variable, and guesses + your default compiler. + + If you need '%(prog)s' to use a specific compiler for *compilation* + then you can use this option to specify a path to that compiler.""") + parser.add_argument( + '--use-c++', + metavar='<path>', + dest='cxx', + default=os.getenv('CXX', 'c++'), + help="""This is the same as "--use-cc" but for C++ code.""") + + +class AppendCommaSeparated(argparse.Action): + """ argparse Action class to support multiple comma separated lists. """ + + def __call__(self, __parser, namespace, values, __option_string): + # getattr(obj, attr, default) does not really returns default but none + if getattr(namespace, self.dest, None) is None: + setattr(namespace, self.dest, []) + # once it's fixed we can use as expected + actual = getattr(namespace, self.dest) + actual.extend(values.split(',')) + setattr(namespace, self.dest, actual) + + +def print_active_checkers(checkers): + """ Print active checkers to stdout. """ + + for name in sorted(name for name, (_, active) in checkers.items() + if active): + print(name) + + +def print_checkers(checkers): + """ Print verbose checker help to stdout. """ + + print('') + print('available checkers:') + print('') + for name in sorted(checkers.keys()): + description, active = checkers[name] + prefix = '+' if active else ' ' + if len(name) > 30: + print(' {0} {1}'.format(prefix, name)) + print(' ' * 35 + description) + else: + print(' {0} {1: <30} {2}'.format(prefix, name, description)) + print('') + print('NOTE: "+" indicates that an analysis is enabled by default.') + print('') Index: tools/scan-build-py/libscanbuild/clang.py =================================================================== --- tools/scan-build-py/libscanbuild/clang.py +++ tools/scan-build-py/libscanbuild/clang.py @@ -9,9 +9,7 @@ a subset of that, it makes sense to create a function specific wrapper. """ import re -import subprocess -import logging -from libscanbuild.shell import decode +from libscanbuild import shell_split, run_command __all__ = ['get_version', 'get_arguments', 'get_checkers'] @@ -25,8 +23,9 @@ :param clang: the compiler we are using :return: the version string printed to stderr """ - output = subprocess.check_output([clang, '-v'], stderr=subprocess.STDOUT) - return output.decode('utf-8').splitlines()[0] + output = run_command([clang, '-v']) + # the relevant version info is in the first line + return output[0] def get_arguments(command, cwd): @@ -38,15 +37,14 @@ cmd = command[:] cmd.insert(1, '-###') - logging.debug('exec command in %s: %s', cwd, ' '.join(cmd)) - output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT) + output = run_command(cmd, cwd=cwd) # The relevant information is in the last line of the output. # Don't check if finding last line fails, would throw exception anyway. - last_line = output.decode('utf-8').splitlines()[-1] + last_line = output[-1] if re.search(r'clang(.*): error:', last_line): raise Exception(last_line) - return decode(last_line) + return shell_split(last_line) def get_active_checkers(clang, plugins): @@ -141,9 +139,7 @@ load = [elem for plugin in plugins for elem in ['-load', plugin]] cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help'] - logging.debug('exec command: %s', ' '.join(cmd)) - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - lines = output.decode('utf-8').splitlines() + lines = run_command(cmd) is_active_checker = is_active(get_active_checkers(clang, plugins)) Index: tools/scan-build-py/libscanbuild/compilation.py =================================================================== --- tools/scan-build-py/libscanbuild/compilation.py +++ tools/scan-build-py/libscanbuild/compilation.py @@ -8,11 +8,14 @@ import re import os import collections +import logging +import json +from libscanbuild import Execution, shell_split -__all__ = ['split_command', 'classify_source', 'compiler_language'] +__all__ = ['classify_source', 'Compilation', 'CompilationDatabase'] # Ignored compiler options map for compilation database creation. -# The map is used in `split_command` method. (Which does ignore and classify +# The map is used in `_split_command` method. (Which does ignore and classify # parameters.) Please note, that these are not the only parameters which # might be ignored. # @@ -48,61 +51,225 @@ '-Xlinker': 1 } -# Known C/C++ compiler executable name patterns -COMPILER_PATTERNS = frozenset([ - re.compile(r'^(intercept-|analyze-|)c(c|\+\+)$'), - re.compile(r'^([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$'), - re.compile(r'^([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'), - re.compile(r'^llvm-g(cc|\+\+)$'), +# Known C/C++ compiler wrapper name patterns +COMPILER_PATTERN_WRAPPER = re.compile(r'^(distcc|ccache)$') + +# Known C compiler executable name patterns +COMPILER_PATTERNS_CC = frozenset([ + re.compile(r'^(|i|mpi)cc$'), + re.compile(r'^([^-]*-)*[mg]cc(-\d+(\.\d+){0,2})?$'), + re.compile(r'^([^-]*-)*clang(-\d+(\.\d+){0,2})?$'), + re.compile(r'^(g|)xlc$'), +]) + +# Known C++ compiler executable name patterns +COMPILER_PATTERNS_CXX = frozenset([ + re.compile(r'^(c\+\+|cxx|CC)$'), + re.compile(r'^([^-]*-)*[mg]\+\+(-\d+(\.\d+){0,2})?$'), + re.compile(r'^([^-]*-)*clang\+\+(-\d+(\.\d+){0,2})?$'), + re.compile(r'^(icpc|mpiCC|mpicxx|mpic\+\+)$'), + re.compile(r'^(g|)xl(C|c\+\+)$'), ]) +CompilationCommand = collections.namedtuple( + 'CompilationCommand', ['compiler', 'flags', 'files']) + + +class Compilation: + def __init__(self, compiler, flags, source, directory): + """ Constructor for a single compilation. + + This method just normalize the paths and store the values. """ + + self.compiler = compiler + self.flags = flags + self.directory = os.path.normpath(directory) + self.source = source if os.path.isabs(source) else \ + os.path.normpath(os.path.join(self.directory, source)) + + def _hash_str(self): + """ Generate unique hash string for compilation entry. + + Python requires __hash__ and __eq__ methods implemented in order to + store the object in a set. We use the set to filter out duplicate + entries from compilation database. + + :return: a unique hash string. """ + + return ':'.join([ + self.source[::-1], # for faster lookup it's reverted + self.directory[::-1], # for faster lookup it's reverted + ' '.join(self.flags), # just concat, don't escape it + self.compiler + ]) + + def __hash__(self): + """ See comment for _hash_str method. """ + + return hash(self._hash_str()) + + def __eq__(self, other): + """ See comment for _hash_str method. """ + + return isinstance(other, Compilation) and \ + self._hash_str() == other._hash_str() + + def to_analyzer(self): + """ This method dumps the object attributes into a dictionary. """ + + return dict((key, value) for key, value in vars(self).items()) + + def to_db(self): + """ This method creates a compilation database entry. """ + + relative = os.path.relpath(self.source, self.directory) + compiler = 'cc' if self.compiler == 'c' else 'c++' + return { + 'file': relative, + 'arguments': [compiler, '-c'] + self.flags + [relative], + 'directory': self.directory + } + + @staticmethod + def from_call(execution, cc='cc', cxx='c++'): + """ Generator method for compilation entries. + + From a single compiler call it can generate zero or more entries. -def split_command(command): - """ Returns a value when the command is a compilation, None otherwise. + :param execution: executed command and working directory + :param cc: user specified C compiler name + :param cxx: user specified C++ compiler name + :return: stream of CompilationDbEntry objects """ - The value on success is a named tuple with the following attributes: + candidate = Compilation._split_command(execution.cmd, cc, cxx) + for source in (candidate.files if candidate else []): + result = Compilation(directory=execution.cwd, + source=source, + compiler=candidate.compiler, + flags=candidate.flags) + if os.path.isfile(result.source): + yield result - files: list of source files - flags: list of compile options - compiler: string value of 'c' or 'c++' """ + @staticmethod + def from_db(entry): + """ Factory method for compilation entry. - # the result of this method - result = collections.namedtuple('Compilation', - ['compiler', 'flags', 'files']) - result.compiler = compiler_language(command) - result.flags = [] - result.files = [] - # quit right now, if the program was not a C/C++ compiler - if not result.compiler: + From compilation database entry it creates the compilation object. + + :param entry: the compilation database entry + :return: a single compilation object """ + + command = shell_split(entry['command']) if 'command' in entry else \ + entry['arguments'] + execution = Execution(cmd=command, cwd=entry['directory'], pid=0) + entries = list(Compilation.from_call(execution)) + assert len(entries) == 1 + return entries[0] + + @staticmethod + def _split_compiler(command, cc, cxx): + """ A predicate to decide the command is a compiler call or not. + + :param command: the command to classify + :param cc: user specified C compiler name + :param cxx: user specified C++ compiler name + :return: None if the command is not a compilation, or a tuple + (compiler_language, rest of the command) otherwise """ + + def is_wrapper(cmd): + return True if COMPILER_PATTERN_WRAPPER.match(cmd) else False + + def is_c_compiler(cmd): + return os.path.basename(cc) == cmd or \ + any(pattern.match(cmd) for pattern in COMPILER_PATTERNS_CC) + + def is_cxx_compiler(cmd): + return os.path.basename(cxx) == cmd or \ + any(pattern.match(cmd) for pattern in COMPILER_PATTERNS_CXX) + + if command: # not empty list will allow to index '0' and '1:' + executable = os.path.basename(command[0]) + parameters = command[1:] + # 'wrapper' 'parameters' and + # 'wrapper' 'compiler' 'parameters' are valid. + # plus, a wrapper can wrap wrapper too. + if is_wrapper(executable): + result = Compilation._split_compiler(parameters, cc, cxx) + return ('c', parameters) if result is None else result + # and 'compiler' 'parameters' is valid. + elif is_c_compiler(executable): + return 'c', parameters + elif is_cxx_compiler(executable): + return 'c++', parameters return None - # iterate on the compile options - args = iter(command[1:]) - for arg in args: - # quit when compilation pass is not involved - if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}: + + @staticmethod + def _split_command(command, cc, cxx): + """ Returns a value when the command is a compilation, None otherwise. + + :param command: the command to classify + :param cc: user specified C compiler name + :param cxx: user specified C++ compiler name + :return: stream of CompilationCommand objects """ + + logging.debug('input was: %s', command) + # quit right now, if the program was not a C/C++ compiler + compiler_and_arguments = Compilation._split_compiler(command, cc, cxx) + if compiler_and_arguments is None: return None - # ignore some flags - elif arg in IGNORED_FLAGS: - count = IGNORED_FLAGS[arg] - for _ in range(count): - next(args) - elif re.match(r'^-(l|L|Wl,).+', arg): - pass - # some parameters could look like filename, take as compile option - elif arg in {'-D', '-I'}: - result.flags.extend([arg, next(args)]) - # parameter which looks source file is taken... - elif re.match(r'^[^-].+', arg) and classify_source(arg): - result.files.append(arg) - # and consider everything else as compile option. - else: - result.flags.append(arg) - # do extra check on number of source files - return result if result.files else None + + # the result of this method + result = CompilationCommand(compiler=compiler_and_arguments[0], + flags=[], + files=[]) + # iterate on the compile options + args = iter(compiler_and_arguments[1]) + for arg in args: + # quit when compilation pass is not involved + if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}: + return None + # ignore some flags + elif arg in IGNORED_FLAGS: + count = IGNORED_FLAGS[arg] + for _ in range(count): + next(args) + elif re.match(r'^-(l|L|Wl,).+', arg): + pass + # some parameters could look like filename, take as compile option + elif arg in {'-D', '-I'}: + result.flags.extend([arg, next(args)]) + # parameter which looks source file is taken... + elif re.match(r'^[^-].+', arg) and classify_source(arg): + result.files.append(arg) + # and consider everything else as compile option. + else: + result.flags.append(arg) + logging.debug('output is: %s', result) + # do extra check on number of source files + return result if result.files else None + + +class CompilationDatabase: + @staticmethod + def save(filename, iterator): + entries = [entry.to_db() for entry in iterator] + with open(filename, 'w+') as handle: + json.dump(entries, handle, sort_keys=True, indent=4) + + @staticmethod + def load(filename): + with open(filename, 'r') as handle: + for entry in json.load(handle): + yield Compilation.from_db(entry) def classify_source(filename, c_compiler=True): - """ Return the language from file name extension. """ + """ Classify source file names and returns the presumed language, + based on the file name extension. + + :param filename: the source file name + :param c_compiler: indicate that the compiler is a C compiler, + :return: the language from file name extension. """ mapping = { '.c': 'c' if c_compiler else 'c++', @@ -125,17 +292,3 @@ __, extension = os.path.splitext(os.path.basename(filename)) return mapping.get(extension) - - -def compiler_language(command): - """ A predicate to decide the command is a compiler call or not. - - Returns 'c' or 'c++' when it match. None otherwise. """ - - cplusplus = re.compile(r'^(.+)(\+\+)(-.+|)$') - - if command: - executable = os.path.basename(command[0]) - if any(pattern.match(executable) for pattern in COMPILER_PATTERNS): - return 'c++' if cplusplus.match(executable) else 'c' - return None Index: tools/scan-build-py/libscanbuild/intercept.py =================================================================== --- tools/scan-build-py/libscanbuild/intercept.py +++ tools/scan-build-py/libscanbuild/intercept.py @@ -20,215 +20,183 @@ The module implements the build command execution and the post-processing of the output files, which will condensates into a compilation database. """ -import sys -import os -import os.path -import re import itertools import json -import glob -import argparse import logging -import subprocess -from libear import build_libear, TemporaryDirectory -from libscanbuild import command_entry_point -from libscanbuild import duplicate_check, tempdir, initialize_logging -from libscanbuild.compilation import split_command -from libscanbuild.shell import encode, decode +import os +import os.path +import re +import sys +import uuid -__all__ = ['capture', 'intercept_build_main', 'intercept_build_wrapper'] +from libear import build_libear, temporary_directory +from libscanbuild import tempdir, command_entry_point, wrapper_entry_point, \ + wrapper_environment, run_build, run_command, Execution +from libscanbuild.arguments import intercept +from libscanbuild.compilation import Compilation, CompilationDatabase -GS = chr(0x1d) -RS = chr(0x1e) -US = chr(0x1f) +__all__ = ['capture', 'intercept_build_main', 'intercept_build_wrapper'] COMPILER_WRAPPER_CC = 'intercept-cc' COMPILER_WRAPPER_CXX = 'intercept-c++' +TRACE_FILE_EXTENSION = '.json' # same as in ear.c +WRAPPER_ONLY_PLATFORMS = frozenset({'win32', 'cygwin'}) @command_entry_point -def intercept_build_main(bin_dir): +def intercept_build_main(): """ Entry point for 'intercept-build' command. """ - parser = create_parser() - args = parser.parse_args() - - initialize_logging(args.verbose) - logging.debug('Parsed arguments: %s', args) + args = intercept() + exit_code, current = capture(args) - if not args.build: - parser.print_help() - return 0 - - return capture(args, bin_dir) + # To support incremental builds, it is desired to read elements from + # an existing compilation database from a previous run. + if args.append and os.path.isfile(args.cdb): + previous = CompilationDatabase.load(args.cdb) + entries = iter(set(itertools.chain(previous, current))) + CompilationDatabase.save(args.cdb, entries) + else: + CompilationDatabase.save(args.cdb, current) + return exit_code -def capture(args, bin_dir): - """ The entry point of build command interception. """ - def post_processing(commands): - """ To make a compilation database, it needs to filter out commands - which are not compiler calls. Needs to find the source file name - from the arguments. And do shell escaping on the command. +def capture(args): + """ Implementation of compilation database generation. - To support incremental builds, it is desired to read elements from - an existing compilation database from a previous run. These elements - shall be merged with the new elements. """ + :param args: the parsed and validated command line arguments + :return: the exit status of build process. """ - # create entries from the current run - current = itertools.chain.from_iterable( - # creates a sequence of entry generators from an exec, - format_entry(command) for command in commands) - # read entries from previous run - if 'append' in args and args.append and os.path.isfile(args.cdb): - with open(args.cdb) as handle: - previous = iter(json.load(handle)) - else: - previous = iter([]) - # filter out duplicate entries from both - duplicate = duplicate_check(entry_hash) - return (entry - for entry in itertools.chain(previous, current) - if os.path.exists(entry['file']) and not duplicate(entry)) - - with TemporaryDirectory(prefix='intercept-', dir=tempdir()) as tmp_dir: + with temporary_directory(prefix='intercept-', dir=tempdir()) as tmp_dir: # run the build command - environment = setup_environment(args, tmp_dir, bin_dir) - logging.debug('run build in environment: %s', environment) - exit_code = subprocess.call(args.build, env=environment) - logging.info('build finished with exit code: %d', exit_code) + environment = setup_environment(args, tmp_dir) + exit_code = run_build(args.build, env=environment) # read the intercepted exec calls - exec_traces = itertools.chain.from_iterable( - parse_exec_trace(os.path.join(tmp_dir, filename)) - for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd')))) - # do post processing only if that was requested - if 'raw_entries' not in args or not args.raw_entries: - entries = post_processing(exec_traces) - else: - entries = exec_traces - # dump the compilation database - with open(args.cdb, 'w+') as handle: - json.dump(list(entries), handle, sort_keys=True, indent=4) - return exit_code + calls = (parse_exec_trace(file) for file in exec_trace_files(tmp_dir)) + current = compilations(calls, args.cc, args.cxx) + + return exit_code, iter(set(current)) -def setup_environment(args, destination, bin_dir): +def compilations(exec_calls, cc, cxx): + """ Needs to filter out commands which are not compiler calls. And those + compiler calls shall be compilation (not pre-processing or linking) calls. + Plus needs to find the source file name from the arguments. + + :param exec_calls: iterator of executions + :param cc: user specified C compiler name + :param cxx: user specified C++ compiler name + :return: stream of formatted compilation database entries """ + + for call in exec_calls: + for entry in Compilation.from_call(call, cc, cxx): + yield entry + + +def setup_environment(args, destination): """ Sets up the environment for the build command. - It sets the required environment variables and execute the given command. - The exec calls will be logged by the 'libear' preloaded library or by the - 'wrapper' programs. """ + In order to capture the sub-commands (executed by the build process), + it needs to prepare the environment. It's either the compiler wrappers + shall be announce as compiler or the intercepting library shall be + announced for the dynamic linker. - c_compiler = args.cc if 'cc' in args else 'cc' - cxx_compiler = args.cxx if 'cxx' in args else 'c++' + :param args: command line arguments + :param destination: directory path for the execution trace files + :return: a prepared set of environment variables. """ - libear_path = None if args.override_compiler or is_preload_disabled( - sys.platform) else build_libear(c_compiler, destination) + use_wrapper = args.override_compiler or is_preload_disabled(sys.platform) environment = dict(os.environ) environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination}) - if not libear_path: - logging.debug('intercept gonna use compiler wrappers') + if use_wrapper: + environment.update(wrapper_environment(args)) environment.update({ - 'CC': os.path.join(bin_dir, COMPILER_WRAPPER_CC), - 'CXX': os.path.join(bin_dir, COMPILER_WRAPPER_CXX), - 'INTERCEPT_BUILD_CC': c_compiler, - 'INTERCEPT_BUILD_CXX': cxx_compiler, - 'INTERCEPT_BUILD_VERBOSE': 'DEBUG' if args.verbose > 2 else 'INFO' - }) - elif sys.platform == 'darwin': - logging.debug('intercept gonna preload libear on OSX') - environment.update({ - 'DYLD_INSERT_LIBRARIES': libear_path, - 'DYLD_FORCE_FLAT_NAMESPACE': '1' + 'CC': COMPILER_WRAPPER_CC, + 'CXX': COMPILER_WRAPPER_CXX, }) else: - logging.debug('intercept gonna preload libear on UNIX') - environment.update({'LD_PRELOAD': libear_path}) + intercept_library = build_libear(args.cc, destination) + if sys.platform == 'darwin': + environment.update({ + 'DYLD_INSERT_LIBRARIES': intercept_library, + 'DYLD_FORCE_FLAT_NAMESPACE': '1' + }) + else: + environment.update({'LD_PRELOAD': intercept_library}) return environment -def intercept_build_wrapper(cplusplus): +@command_entry_point +@wrapper_entry_point +def intercept_build_wrapper(**kwargs): """ Entry point for `intercept-cc` and `intercept-c++` compiler wrappers. - It does generate execution report into target directory. And execute - the wrapped compilation with the real compiler. The parameters for - report and execution are from environment variables. + It does generate execution report into target directory. + The target directory name is from environment variables. """ - Those parameters which for 'libear' library can't have meaningful - values are faked. """ + message_prefix = 'execution report might be incomplete: %s' - # initialize wrapper logging - logging.basicConfig(format='intercept: %(levelname)s: %(message)s', - level=os.getenv('INTERCEPT_BUILD_VERBOSE', 'INFO')) - # write report + target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR') + if not target_dir: + logging.warning(message_prefix, 'missing target directory') + return + # write current execution info to the pid file try: - target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR') - if not target_dir: - raise UserWarning('exec report target directory not found') - pid = str(os.getpid()) - target_file = os.path.join(target_dir, pid + '.cmd') - logging.debug('writing exec report to: %s', target_file) - with open(target_file, 'ab') as handler: - working_dir = os.getcwd() - command = US.join(sys.argv) + US - content = RS.join([pid, pid, 'wrapper', working_dir, command]) + GS - handler.write(content.encode('utf-8')) + target_file_name = str(uuid.uuid4()) + TRACE_FILE_EXTENSION + target_file = os.path.join(target_dir, target_file_name) + logging.debug('writing execution report to: %s', target_file) + write_exec_trace(target_file, kwargs['execution']) except IOError: - logging.exception('writing exec report failed') - except UserWarning as warning: - logging.warning(warning) - # execute with real compiler - compiler = os.getenv('INTERCEPT_BUILD_CXX', 'c++') if cplusplus \ - else os.getenv('INTERCEPT_BUILD_CC', 'cc') - compilation = [compiler] + sys.argv[1:] - logging.debug('execute compiler: %s', compilation) - return subprocess.call(compilation) + logging.warning(message_prefix, 'io problem') + + +def write_exec_trace(filename, entry): + """ Write execution report file. + + This method shall be sync with the execution report writer in interception + library. The entry in the file is a JSON objects. + + :param filename: path to the output execution trace file, + :param entry: the Execution object to append to that file. """ + + call = {'pid': entry.pid, 'cwd': entry.cwd, 'cmd': entry.cmd} + with open(filename, 'w') as handler: + json.dump(call, handler) def parse_exec_trace(filename): - """ Parse the file generated by the 'libear' preloaded library. + """ Parse execution report file. Given filename points to a file which contains the basic report - generated by the interception library or wrapper command. A single - report file _might_ contain multiple process creation info. """ + generated by the interception library or compiler wrapper. - logging.debug('parse exec trace file: %s', filename) + :param filename: path to an execution trace file to read from, + :return: an Execution object. """ + + logging.debug(filename) with open(filename, 'r') as handler: - content = handler.read() - for group in filter(bool, content.split(GS)): - records = group.split(RS) - yield { - 'pid': records[0], - 'ppid': records[1], - 'function': records[2], - 'directory': records[3], - 'command': records[4].split(US)[:-1] - } - - -def format_entry(exec_trace): - """ Generate the desired fields for compilation database entries. """ - - def abspath(cwd, name): - """ Create normalized absolute path from input filename. """ - fullname = name if os.path.isabs(name) else os.path.join(cwd, name) - return os.path.normpath(fullname) - - logging.debug('format this command: %s', exec_trace['command']) - compilation = split_command(exec_trace['command']) - if compilation: - for source in compilation.files: - compiler = 'c++' if compilation.compiler == 'c++' else 'cc' - command = [compiler, '-c'] + compilation.flags + [source] - logging.debug('formated as: %s', command) - yield { - 'directory': exec_trace['directory'], - 'command': encode(command), - 'file': abspath(exec_trace['directory'], source) - } + entry = json.load(handler) + return Execution( + pid=entry['pid'], + cwd=entry['cwd'], + cmd=entry['cmd']) + + +def exec_trace_files(directory): + """ Generates exec trace file names. + + :param directory: path to directory which contains the trace files. + :return: a generator of file names (absolute path). """ + + for root, _, files in os.walk(directory): + for candidate in files: + __, extension = os.path.splitext(candidate) + if extension == TRACE_FILE_EXTENSION: + yield os.path.join(root, candidate) def is_preload_disabled(platform): @@ -238,102 +206,17 @@ the path and, if so, (2) whether the output of executing 'csrutil status' contains 'System Integrity Protection status: enabled'. - Same problem on linux when SELinux is enabled. The status query program - 'sestatus' and the output when it's enabled 'SELinux status: enabled'. """ + :param platform: name of the platform (returned by sys.platform), + :return: True if library preload will fail by the dynamic linker. """ - if platform == 'darwin': - pattern = re.compile(r'System Integrity Protection status:\s+enabled') + if platform in WRAPPER_ONLY_PLATFORMS: + return True + elif platform == 'darwin': command = ['csrutil', 'status'] - elif platform in {'linux', 'linux2'}: - pattern = re.compile(r'SELinux status:\s+enabled') - command = ['sestatus'] + pattern = re.compile(r'System Integrity Protection status:\s+enabled') + try: + return any(pattern.match(line) for line in run_command(command)) + except: + return False else: return False - - try: - lines = subprocess.check_output(command).decode('utf-8') - return any((pattern.match(line) for line in lines.splitlines())) - except: - return False - - -def entry_hash(entry): - """ Implement unique hash method for compilation database entries. """ - - # For faster lookup in set filename is reverted - filename = entry['file'][::-1] - # For faster lookup in set directory is reverted - directory = entry['directory'][::-1] - # On OS X the 'cc' and 'c++' compilers are wrappers for - # 'clang' therefore both call would be logged. To avoid - # this the hash does not contain the first word of the - # command. - command = ' '.join(decode(entry['command'])[1:]) - - return '<>'.join([filename, directory, command]) - - -def create_parser(): - """ Command line argument parser factory method. """ - - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument( - '--verbose', '-v', - action='count', - default=0, - help="""Enable verbose output from '%(prog)s'. A second and third - flag increases verbosity.""") - parser.add_argument( - '--cdb', - metavar='<file>', - default="compile_commands.json", - help="""The JSON compilation database.""") - group = parser.add_mutually_exclusive_group() - group.add_argument( - '--append', - action='store_true', - help="""Append new entries to existing compilation database.""") - group.add_argument( - '--disable-filter', '-n', - dest='raw_entries', - action='store_true', - help="""Intercepted child process creation calls (exec calls) are all - logged to the output. The output is not a compilation database. - This flag is for debug purposes.""") - - advanced = parser.add_argument_group('advanced options') - advanced.add_argument( - '--override-compiler', - action='store_true', - help="""Always resort to the compiler wrapper even when better - intercept methods are available.""") - advanced.add_argument( - '--use-cc', - metavar='<path>', - dest='cc', - default='cc', - help="""When '%(prog)s' analyzes a project by interposing a compiler - wrapper, which executes a real compiler for compilation and - do other tasks (record the compiler invocation). Because of - this interposing, '%(prog)s' does not know what compiler your - project normally uses. Instead, it simply overrides the CC - environment variable, and guesses your default compiler. - - If you need '%(prog)s' to use a specific compiler for - *compilation* then you can use this option to specify a path - to that compiler.""") - advanced.add_argument( - '--use-c++', - metavar='<path>', - dest='cxx', - default='c++', - help="""This is the same as "--use-cc" but for C++ code.""") - - parser.add_argument( - dest='build', - nargs=argparse.REMAINDER, - help="""Command to run.""") - - return parser Index: tools/scan-build-py/libscanbuild/report.py =================================================================== --- tools/scan-build-py/libscanbuild/report.py +++ tools/scan-build-py/libscanbuild/report.py @@ -13,102 +13,64 @@ import os.path import sys import shutil -import time -import tempfile import itertools import plistlib import glob import json import logging -import contextlib import datetime -from libscanbuild import duplicate_check from libscanbuild.clang import get_version -__all__ = ['report_directory', 'document'] +__all__ = ['document'] -@contextlib.contextmanager -def report_directory(hint, keep): - """ Responsible for the report directory. - - hint -- could specify the parent directory of the output directory. - keep -- a boolean value to keep or delete the empty report directory. """ - - stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' - stamp = datetime.datetime.now().strftime(stamp_format) - - parentdir = os.path.abspath(hint) - if not os.path.exists(parentdir): - os.makedirs(parentdir) - - name = tempfile.mkdtemp(prefix=stamp, dir=parentdir) - - logging.info('Report directory created: %s', name) - - try: - yield name - finally: - if os.listdir(name): - msg = "Run 'scan-view %s' to examine bug reports." - keep = True - else: - if keep: - msg = "Report directory '%s' contans no report, but kept." - else: - msg = "Removing directory '%s' because it contains no report." - logging.warning(msg, name) - - if not keep: - os.rmdir(name) - - -def document(args, output_dir, use_cdb): +def document(args): """ Generates cover report and returns the number of bugs/crashes. """ html_reports_available = args.output_format in {'html', 'plist-html'} logging.debug('count crashes and bugs') - crash_count = sum(1 for _ in read_crashes(output_dir)) + crash_count = sum(1 for _ in read_crashes(args.output)) bug_counter = create_counters() - for bug in read_bugs(output_dir, html_reports_available): + for bug in read_bugs(args.output, html_reports_available): bug_counter(bug) result = crash_count + bug_counter.total if html_reports_available and result: + use_cdb = os.path.exists(args.cdb) + logging.debug('generate index.html file') - # common prefix for source files to have sort filenames + # common prefix for source files to have sorter path prefix = commonprefix_from(args.cdb) if use_cdb else os.getcwd() # assemble the cover from multiple fragments + fragments = [] try: - fragments = [] if bug_counter.total: - fragments.append(bug_summary(output_dir, bug_counter)) - fragments.append(bug_report(output_dir, prefix)) + fragments.append(bug_summary(args.output, bug_counter)) + fragments.append(bug_report(args.output, prefix)) if crash_count: - fragments.append(crash_report(output_dir, prefix)) - assemble_cover(output_dir, prefix, args, fragments) - # copy additinal files to the report - copy_resource_files(output_dir) + fragments.append(crash_report(args.output, prefix)) + assemble_cover(args, prefix, fragments) + # copy additional files to the report + copy_resource_files(args.output) if use_cdb: - shutil.copy(args.cdb, output_dir) + shutil.copy(args.cdb, args.output) finally: for fragment in fragments: os.remove(fragment) return result -def assemble_cover(output_dir, prefix, args, fragments): +def assemble_cover(args, prefix, fragments): """ Put together the fragments into a final report. """ import getpass import socket - import datetime if args.html_title is None: args.html_title = os.path.basename(prefix) + ' - analyzer results' - with open(os.path.join(output_dir, 'index.html'), 'w') as handle: + with open(os.path.join(args.output, 'index.html'), 'w') as handle: indent = 0 handle.write(reindent(""" |<!DOCTYPE html> @@ -375,11 +337,12 @@ match = re.match(r'(.*)\.info\.txt', filename) name = match.group(1) if match else None - with open(filename) as handler: - lines = handler.readlines() + with open(filename, mode='rb') as handler: + # this is a workaround to fix windows read '\r\n' as new lines + lines = [line.decode().rstrip() for line in handler.readlines()] return { - 'source': lines[0].rstrip(), - 'problem': lines[1].rstrip(), + 'source': lines[0], + 'problem': lines[1], 'file': name, 'info': name + '.info.txt', 'stderr': name + '.stderr.txt' @@ -399,6 +362,35 @@ return escape('bt_' + smash('bug_category') + '_' + smash('bug_type')) +def duplicate_check(hash_function): + """ Workaround to detect duplicate dictionary values. + + Python `dict` type has no `hash` method, which is required by the `set` + type to store elements. + + This solution still not store the `dict` as value in a `set`. Instead + it calculate a `string` hash and store that. Therefore it can only say + that hash is already taken or not. + + This method is a factory method, which returns a predicate. """ + + def predicate(entry): + """ The predicate which calculates and stores the hash of the given + entries. The entry type has to work with the given hash function. + + :param entry: the questioned entry, + :return: true/false depends the hash value is already seen or not. + """ + entry_hash = hash_function(entry) + if entry_hash not in state: + state.add(entry_hash) + return False + return True + + state = set() + return predicate + + def create_counters(): """ Create counters for bug statistics. @@ -519,9 +511,11 @@ def commonprefix(files): - """ Fixed version of os.path.commonprefix. Return the longest path prefix - that is a prefix of all paths in filenames. """ + """ Fixed version of os.path.commonprefix. + :param files: list of file names + :return: the longest path prefix that is a prefix of all paths in files + """ result = None for current in files: if result is not None: Index: tools/scan-build-py/libscanbuild/runner.py =================================================================== --- tools/scan-build-py/libscanbuild/runner.py +++ /dev/null @@ -1,302 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -""" This module is responsible to run the analyzer commands. """ - -import re -import os -import os.path -import tempfile -import functools -import subprocess -import logging -from libscanbuild.compilation import classify_source, compiler_language -from libscanbuild.clang import get_version, get_arguments -from libscanbuild.shell import decode - -__all__ = ['run'] - -# To have good results from static analyzer certain compiler options shall be -# omitted. The compiler flag filtering only affects the static analyzer run. -# -# Keys are the option name, value number of options to skip -IGNORED_FLAGS = { - '-c': 0, # compile option will be overwritten - '-fsyntax-only': 0, # static analyzer option will be overwritten - '-o': 1, # will set up own output file - # flags below are inherited from the perl implementation. - '-g': 0, - '-save-temps': 0, - '-install_name': 1, - '-exported_symbols_list': 1, - '-current_version': 1, - '-compatibility_version': 1, - '-init': 1, - '-e': 1, - '-seg1addr': 1, - '-bundle_loader': 1, - '-multiply_defined': 1, - '-sectorder': 3, - '--param': 1, - '--serialize-diagnostics': 1 -} - - -def require(required): - """ Decorator for checking the required values in state. - - It checks the required attributes in the passed state and stop when - any of those is missing. """ - - def decorator(function): - @functools.wraps(function) - def wrapper(*args, **kwargs): - for key in required: - if key not in args[0]: - raise KeyError('{0} not passed to {1}'.format( - key, function.__name__)) - - return function(*args, **kwargs) - - return wrapper - - return decorator - - -@require(['command', # entry from compilation database - 'directory', # entry from compilation database - 'file', # entry from compilation database - 'clang', # clang executable name (and path) - 'direct_args', # arguments from command line - 'force_debug', # kill non debug macros - 'output_dir', # where generated report files shall go - 'output_format', # it's 'plist' or 'html' or both - 'output_failures']) # generate crash reports or not -def run(opts): - """ Entry point to run (or not) static analyzer against a single entry - of the compilation database. - - This complex task is decomposed into smaller methods which are calling - each other in chain. If the analyzis is not possibe the given method - just return and break the chain. - - The passed parameter is a python dictionary. Each method first check - that the needed parameters received. (This is done by the 'require' - decorator. It's like an 'assert' to check the contract between the - caller and the called method.) """ - - try: - command = opts.pop('command') - command = command if isinstance(command, list) else decode(command) - logging.debug("Run analyzer against '%s'", command) - opts.update(classify_parameters(command)) - - return arch_check(opts) - except Exception: - logging.error("Problem occured during analyzis.", exc_info=1) - return None - - -@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', - 'error_type', 'error_output', 'exit_code']) -def report_failure(opts): - """ Create report when analyzer failed. - - The major report is the preprocessor output. The output filename generated - randomly. The compiler output also captured into '.stderr.txt' file. - And some more execution context also saved into '.info.txt' file. """ - - def extension(opts): - """ Generate preprocessor file extension. """ - - mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} - return mapping.get(opts['language'], '.i') - - def destination(opts): - """ Creates failures directory if not exits yet. """ - - name = os.path.join(opts['output_dir'], 'failures') - if not os.path.isdir(name): - os.makedirs(name) - return name - - error = opts['error_type'] - (handle, name) = tempfile.mkstemp(suffix=extension(opts), - prefix='clang_' + error + '_', - dir=destination(opts)) - os.close(handle) - cwd = opts['directory'] - cmd = get_arguments([opts['clang'], '-fsyntax-only', '-E'] + - opts['flags'] + [opts['file'], '-o', name], cwd) - logging.debug('exec command in %s: %s', cwd, ' '.join(cmd)) - subprocess.call(cmd, cwd=cwd) - # write general information about the crash - with open(name + '.info.txt', 'w') as handle: - handle.write(opts['file'] + os.linesep) - handle.write(error.title().replace('_', ' ') + os.linesep) - handle.write(' '.join(cmd) + os.linesep) - handle.write(' '.join(os.uname()) + os.linesep) - handle.write(get_version(opts['clang'])) - handle.close() - # write the captured output too - with open(name + '.stderr.txt', 'w') as handle: - handle.writelines(opts['error_output']) - handle.close() - # return with the previous step exit code and output - return { - 'error_output': opts['error_output'], - 'exit_code': opts['exit_code'] - } - - -@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', - 'output_format']) -def run_analyzer(opts, continuation=report_failure): - """ It assembles the analysis command line and executes it. Capture the - output of the analysis and returns with it. If failure reports are - requested, it calls the continuation to generate it. """ - - def output(): - """ Creates output file name for reports. """ - if opts['output_format'] in {'plist', 'plist-html'}: - (handle, name) = tempfile.mkstemp(prefix='report-', - suffix='.plist', - dir=opts['output_dir']) - os.close(handle) - return name - return opts['output_dir'] - - cwd = opts['directory'] - cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + - opts['flags'] + [opts['file'], '-o', output()], - cwd) - logging.debug('exec command in %s: %s', cwd, ' '.join(cmd)) - child = subprocess.Popen(cmd, - cwd=cwd, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - output = child.stdout.readlines() - child.stdout.close() - # do report details if it were asked - child.wait() - if opts.get('output_failures', False) and child.returncode: - error_type = 'crash' if child.returncode & 127 else 'other_error' - opts.update({ - 'error_type': error_type, - 'error_output': output, - 'exit_code': child.returncode - }) - return continuation(opts) - # return the output for logging and exit code for testing - return {'error_output': output, 'exit_code': child.returncode} - - -@require(['flags', 'force_debug']) -def filter_debug_flags(opts, continuation=run_analyzer): - """ Filter out nondebug macros when requested. """ - - if opts.pop('force_debug'): - # lazy implementation just append an undefine macro at the end - opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) - - return continuation(opts) - - -@require(['language', 'compiler', 'file', 'flags']) -def language_check(opts, continuation=filter_debug_flags): - """ Find out the language from command line parameters or file name - extension. The decision also influenced by the compiler invocation. """ - - accepted = frozenset({ - 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', - 'c++-cpp-output', 'objective-c-cpp-output' - }) - - # language can be given as a parameter... - language = opts.pop('language') - compiler = opts.pop('compiler') - # ... or find out from source file extension - if language is None and compiler is not None: - language = classify_source(opts['file'], compiler == 'c') - - if language is None: - logging.debug('skip analysis, language not known') - return None - elif language not in accepted: - logging.debug('skip analysis, language not supported') - return None - else: - logging.debug('analysis, language: %s', language) - opts.update({'language': language, - 'flags': ['-x', language] + opts['flags']}) - return continuation(opts) - - -@require(['arch_list', 'flags']) -def arch_check(opts, continuation=language_check): - """ Do run analyzer through one of the given architectures. """ - - disabled = frozenset({'ppc', 'ppc64'}) - - received_list = opts.pop('arch_list') - if received_list: - # filter out disabled architectures and -arch switches - filtered_list = [a for a in received_list if a not in disabled] - if filtered_list: - # There should be only one arch given (or the same multiple - # times). If there are multiple arch are given and are not - # the same, those should not change the pre-processing step. - # But that's the only pass we have before run the analyzer. - current = filtered_list.pop() - logging.debug('analysis, on arch: %s', current) - - opts.update({'flags': ['-arch', current] + opts['flags']}) - return continuation(opts) - else: - logging.debug('skip analysis, found not supported arch') - return None - else: - logging.debug('analysis, on default arch') - return continuation(opts) - - -def classify_parameters(command): - """ Prepare compiler flags (filters some and add others) and take out - language (-x) and architecture (-arch) flags for future processing. """ - - result = { - 'flags': [], # the filtered compiler flags - 'arch_list': [], # list of architecture flags - 'language': None, # compilation language, None, if not specified - 'compiler': compiler_language(command) # 'c' or 'c++' - } - - # iterate on the compile options - args = iter(command[1:]) - for arg in args: - # take arch flags into a separate basket - if arg == '-arch': - result['arch_list'].append(next(args)) - # take language - elif arg == '-x': - result['language'] = next(args) - # parameters which looks source file are not flags - elif re.match(r'^[^-].+', arg) and classify_source(arg): - pass - # ignore some flags - elif arg in IGNORED_FLAGS: - count = IGNORED_FLAGS[arg] - for _ in range(count): - next(args) - # we don't care about extra warnings, but we should suppress ones - # that we don't want to see. - elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): - pass - # and consider everything else as compilation flag. - else: - result['flags'].append(arg) - - return result Index: tools/scan-build-py/libscanbuild/shell.py =================================================================== --- tools/scan-build-py/libscanbuild/shell.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -""" This module implements basic shell escaping/unescaping methods. """ - -import re -import shlex - -__all__ = ['encode', 'decode'] - - -def encode(command): - """ Takes a command as list and returns a string. """ - - def needs_quote(word): - """ Returns true if arguments needs to be protected by quotes. - - Previous implementation was shlex.split method, but that's not good - for this job. Currently is running through the string with a basic - state checking. """ - - reserved = {' ', '$', '%', '&', '(', ')', '[', ']', '{', '}', '*', '|', - '<', '>', '@', '?', '!'} - state = 0 - for current in word: - if state == 0 and current in reserved: - return True - elif state == 0 and current == '\\': - state = 1 - elif state == 1 and current in reserved | {'\\'}: - state = 0 - elif state == 0 and current == '"': - state = 2 - elif state == 2 and current == '"': - state = 0 - elif state == 0 and current == "'": - state = 3 - elif state == 3 and current == "'": - state = 0 - return state != 0 - - def escape(word): - """ Do protect argument if that's needed. """ - - table = {'\\': '\\\\', '"': '\\"'} - escaped = ''.join([table.get(c, c) for c in word]) - - return '"' + escaped + '"' if needs_quote(word) else escaped - - return " ".join([escape(arg) for arg in command]) - - -def decode(string): - """ Takes a command string and returns as a list. """ - - def unescape(arg): - """ Gets rid of the escaping characters. """ - - if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"': - arg = arg[1:-1] - return re.sub(r'\\(["\\])', r'\1', arg) - return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg) - - return [unescape(arg) for arg in shlex.split(string)] Index: tools/scan-build-py/tests/__init__.py =================================================================== --- tools/scan-build-py/tests/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import unittest - -import tests.unit -import tests.functional.cases - - -def suite(): - loader = unittest.TestLoader() - suite = unittest.TestSuite() - suite.addTests(loader.loadTestsFromModule(tests.unit)) - suite.addTests(loader.loadTestsFromModule(tests.functional.cases)) - return suite Index: tools/scan-build-py/tests/functional/Input/compile_error.c =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/Input/compile_error.c @@ -0,0 +1 @@ +int test() { ; \ No newline at end of file Index: tools/scan-build-py/tests/functional/Input/div_zero.c =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/Input/div_zero.c @@ -0,0 +1,11 @@ +int bad_guy(int * i) +{ + *i = 9; + return *i; +} + +void bad_guy_test() +{ + int * ptr = 0; + bad_guy(ptr); +} Index: tools/scan-build-py/tests/functional/Input/main.c =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/Input/main.c @@ -0,0 +1 @@ +int main() { return 0; } Index: tools/scan-build-py/tests/functional/cases/__init__.py =================================================================== --- tools/scan-build-py/tests/functional/cases/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import re -import os.path -import subprocess - - -def load_tests(loader, suite, pattern): - from . import test_from_cdb - suite.addTests(loader.loadTestsFromModule(test_from_cdb)) - from . import test_from_cmd - suite.addTests(loader.loadTestsFromModule(test_from_cmd)) - from . import test_create_cdb - suite.addTests(loader.loadTestsFromModule(test_create_cdb)) - from . import test_exec_anatomy - suite.addTests(loader.loadTestsFromModule(test_exec_anatomy)) - return suite - - -def make_args(target): - this_dir, _ = os.path.split(__file__) - path = os.path.normpath(os.path.join(this_dir, '..', 'src')) - return ['make', 'SRCDIR={}'.format(path), 'OBJDIR={}'.format(target), '-f', - os.path.join(path, 'build', 'Makefile')] - - -def silent_call(cmd, *args, **kwargs): - kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT}) - return subprocess.call(cmd, *args, **kwargs) - - -def silent_check_call(cmd, *args, **kwargs): - kwargs.update({'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT}) - return subprocess.check_call(cmd, *args, **kwargs) - - -def call_and_report(analyzer_cmd, build_cmd): - child = subprocess.Popen(analyzer_cmd + ['-v'] + build_cmd, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - - pattern = re.compile('Report directory created: (.+)') - directory = None - for line in child.stdout.readlines(): - match = pattern.search(line) - if match and match.lastindex == 1: - directory = match.group(1) - break - child.stdout.close() - child.wait() - - return (child.returncode, directory) - - -def check_call_and_report(analyzer_cmd, build_cmd): - exit_code, result = call_and_report(analyzer_cmd, build_cmd) - if exit_code != 0: - raise subprocess.CalledProcessError( - exit_code, analyzer_cmd + build_cmd, None) - else: - return result - - -def create_empty_file(filename): - with open(filename, 'a') as handle: - pass Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_architecture_specified.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_architecture_specified.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/architecture_specified +# RUN: cd %T/architecture_specified; %{analyze-build} -o . --cdb input.json | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── input.json +# ├── check.sh +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +cat >> "${root_dir}/input.json" << EOF +[ + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=1" + }, + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=2 -arch i386" + }, + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=3 -arch x86_64" + }, + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=4 -arch ppc" + } +] +EOF + +checker_file="${root_dir}/check.sh" +cat >> ${checker_file} << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +runs=\$(grep "exec command" | sort | uniq) + +assert_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -eq 0 ]; then + echo "\$message" && false; + fi +} + +assert_not_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -gt 0 ]; then + echo "\$message" && false; + fi +} + +assert_present "ver=1" "default architecture was analised" +assert_present "ver=2" "given architecture (i386) was analised" +assert_present "ver=3" "given architecture (x86_64) was analised" +assert_not_present "ver=4" "not supported architecture was not analised" + +assert_present "ver=8" "test assert present" || true +assert_not_present "ver=1" "test assert not present" || true +EOF +chmod +x ${checker_file} Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_debug_code.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_debug_code.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +# XFAIL: * +# RUN: bash %s %T/debug_code +# RUN: cd %T/debug_code; %{scan-build} -o . --status-bugs --force-analyze-debug-code ./run.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cat >> "${root_dir}/src/broken.c" << EOF +#if NDEBUG +#else +EOF +cat >> "${root_dir}/src/broken.c" < "${test_input_dir}/div_zero.c" +cat >> "${root_dir}/src/broken.c" << EOF +#endif +EOF + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c ./src/broken.c -o ./src/broken.o -DNDEBUG; +true; +EOF +chmod +x ${build_file} Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_disable_checkers.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_disable_checkers.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/disable_checkers +# RUN: cd %T/disable_checkers; %{analyze-build} -o . --status-bugs --disable-checker core.NullDereference --cdb input.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── input.json +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +cat >> "${root_dir}/input.json" << EOF +[ + { + "directory": "${root_dir}", + "file": "${root_dir}/src/broken.c", + "command": "cc -c ./src/broken.c -o ./src/broken.o" + } +] +EOF Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_enable_checkers.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_enable_checkers.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/enable_checkers +# RUN: cd %T/enable_checkers; %{analyze-build} -o . --enable-checker debug.ConfigDumper --cdb input.json | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── input.json +# ├── check.sh +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +cat >> "${root_dir}/input.json" << EOF +[ + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o" + } +] +EOF + +checker_file="${root_dir}/check.sh" +cat >> ${checker_file} << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +runs=\$(grep "exec command" | sort | uniq) + +assert_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -eq 0 ]; then + echo "\$message" && false; + fi +} + +assert_not_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -gt 0 ]; then + echo "\$message" && false; + fi +} + + +assert_present "debug.ConfigDumper" "checker name present" +assert_present "-analyzer-checker" "enable checker flag present" +assert_not_present "-analyzer-disable-checker" "disable checker flag missing" +EOF +chmod +x ${checker_file} Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_exclude_files.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_exclude_files.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/exclude_files +# RUN: cd %T/exclude_files; %{scan-build} -o . --exclude src/ignore --intercept-first ./run.sh | ./check.sh +# RUN: cd %T/exclude_files; %{scan-build} -o . --exclude src/ignore --intercept-first --override-compiler ./run.sh | ./check.sh +# RUN: cd %T/exclude_files; %{scan-build} -o . --exclude src/ignore --override-compiler ./run.sh | ./check.sh +# RUN: cd %T/exclude_files; %{scan-build} -o . --exclude %T/exclude_files/src/ignore --intercept-first ./run.sh | ./check.sh +# RUN: cd %T/exclude_files; %{scan-build} -o . --exclude %T/exclude_files/src/ignore --intercept-first --override-compiler ./run.sh | ./check.sh +# RUN: cd %T/exclude_files; %{scan-build} -o . --exclude %T/exclude_files/src/ignore --override-compiler ./run.sh | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── check.sh +# ├── run.sh +# └── src +# ├── empty.c +# └── ignore +# └── empty.c + + +root_dir=$1 +mkdir -p "${root_dir}/src/ignore" + +touch "${root_dir}/src/empty.c" +touch "${root_dir}/src/ignore/empty.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c ./src/empty.c -o ./src/empty.o -Dver=1; +"\$CC" -c "${root_dir}/src/empty.c" -o ./src/empty.o -Dver=2; +"\$CC" -c ./src/ignore/empty.c -o ./src/ignore/empty.o -Dver=3; +"\$CC" -c "${root_dir}/src/ignore/empty.c" -o ./src/ignore/empty.o -Dver=4; +true; +EOF +chmod +x ${build_file} + +checker_file="${root_dir}/check.sh" +cat >> ${checker_file} << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +runs=\$(grep "exec command" | sort | uniq) + +assert_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -eq 0 ]; then + echo "\$message" && false; + fi +} + +assert_not_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -gt 0 ]; then + echo "\$message" && false; + fi +} + +assert_present "ver=1" "analyzer shall run against ver=1" +assert_present "ver=2" "analyzer shall run against ver=2" +assert_not_present "ver=3" "analyzer shall not run against ver=3" +assert_not_present "ver=4" "analyzer shall not run against ver=4" + +assert_present "ver=8" "test assert present" || true +assert_not_present "ver=1" "test assert not present" || true +EOF +chmod +x ${checker_file} Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_ignore_configure.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_ignore_configure.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/ignore_configure +# RUN: cd %T/ignore_configure; %{scan-build} -o . --intercept-first ./configure| ./check.sh +# RUN: cd %T/ignore_configure; %{scan-build} -o . --intercept-first --override-compiler ./configure | ./check.sh +# RUN: cd %T/ignore_configure; %{scan-build} -o . --override-compiler ./configure | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── configure +# ├── check.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +build_file="${root_dir}/configure" +cat >> "${build_file}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +\${CC} -c -o src/broken.o src/broken.c +true +EOF +chmod +x "${build_file}" + +checker_file="${root_dir}/check.sh" +cat >> "${checker_file}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ -d "\$out_dir" ] +then + echo "output directory should not exists" + false +fi +EOF +chmod +x "${checker_file}" \ No newline at end of file Index: tools/scan-build-py/tests/functional/cases/analyze/analyze_language_specified.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/analyze_language_specified.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/language_specified +# RUN: cd %T/language_specified; %{analyze-build} -o . --cdb input.json | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── input.json +# ├── check.sh +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +cat >> "${root_dir}/input.json" << EOF +[ + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=1" + }, + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=2 -x c" + }, + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=3 -x c++" + }, + { + "directory": "${root_dir}", + "file": "${root_dir}/src/empty.c", + "command": "cc -c ./src/empty.c -o ./src/empty.o -Dver=4 -x fortran" + } +] +EOF + +checker_file="${root_dir}/check.sh" +cat >> ${checker_file} << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +runs=\$(grep "exec command" | sort | uniq) + +assert_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -eq 0 ]; then + echo "\$message" && false; + fi +} + +assert_not_present() { + local pattern="\$1"; + local message="\$2"; + + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -gt 0 ]; then + echo "\$message" && false; + fi +} + +assert_present "ver=1" "default language was analised" +assert_present "ver=2" "given language (c) was analised" +assert_present "ver=3" "given language (c++) was analised" +assert_not_present "ver=4" "not supported language was not analised" + +assert_present "ver=8" "test assert present" || true +assert_not_present "ver=1" "test assert not present" || true +EOF +chmod +x ${checker_file} Index: tools/scan-build-py/tests/functional/cases/analyze/exit_code_failed.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/exit_code_failed.py @@ -0,0 +1,3 @@ +# XFAIL: * +# RUN: mkdir %T/exit_code_for_fail +# RUN: cd %T/exit_code_for_fail; %{scan-build} false Index: tools/scan-build-py/tests/functional/cases/analyze/exit_code_failed_shows_bugs.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/exit_code_failed_shows_bugs.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# XFAIL: * +# RUN: bash %s %T/exit_code_failed_shows_bugs +# RUN: cd %T/exit_code_failed_shows_bugs; %{analyze-build} -o . --status-bugs --cdb input.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── input.json +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +cat >> "${root_dir}/input.json" << EOF +[ + { + "directory": "${root_dir}", + "file": "${root_dir}/src/broken.c", + "command": "cc -c ./src/broken.c -o ./src/broken.o" + } +] +EOF Index: tools/scan-build-py/tests/functional/cases/analyze/exit_code_failed_shows_bugs_on_plist.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/exit_code_failed_shows_bugs_on_plist.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# XFAIL: * +# RUN: bash %s %T/exit_code_failed_shows_bugs +# RUN: cd %T/exit_code_failed_shows_bugs; %{scan-build} -o . --status-bugs --plist --cdb input.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +cat >> "${root_dir}/run.sh" << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c ./src/broken.c -o ./src/broken.o -DNDEBUG; +true; +EOF Index: tools/scan-build-py/tests/functional/cases/analyze/exit_code_success.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/exit_code_success.py @@ -0,0 +1,6 @@ +# RUN: mkdir %T/exit_code_for_success +# RUN: cd %T/exit_code_for_success; %{scan-build} true +# RUN: cd %T/exit_code_for_success; %{scan-build} --status-bugs true +# RUN: cd %T/exit_code_for_success; %{scan-build} --status-bugs false +# RUN: cd %T/exit_code_for_success; %{scan-build} --status-bugs --plist true +# RUN: cd %T/exit_code_for_success; %{scan-build} --status-bugs --plist false Index: tools/scan-build-py/tests/functional/cases/analyze/quoted_arguments.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/quoted_arguments.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/quoted_arguments +# RUN: cd %T/quoted_arguments; %{scan-build} -o . --status-bugs --intercept-first ./run.sh +# RUN: cd %T/quoted_arguments; %{scan-build} -o . --status-bugs --intercept-first --override-compiler ./run.sh +# RUN: cd %T/quoted_arguments; %{scan-build} -o . --status-bugs --override-compiler ./run.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# └── src +# └── names.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cat >> "${root_dir}/src/names.c" << EOF +char const * const first = FIRST; +char const * const last = LAST; + +#include <stdio.h> + +int main() { + printf("hi %s %s, how are you?\n", first, last); + return 0; +} +EOF + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" ./src/names.c -o names -DFIRST=\"Sir\ John\" -DLAST="\"Smith Dr\""; +./names | grep "hi Sir John Smith Dr, how are you?" +EOF +chmod +x ${build_file} Index: tools/scan-build-py/tests/functional/cases/analyze/scan_runs_analyzer.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/scan_runs_analyzer.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/runs_analyzer +# RUN: cd %T/runs_analyzer; %{scan-build} -o . --intercept-first ./run.sh | ./check.sh +# RUN: cd %T/runs_analyzer; %{scan-build} -o . --intercept-first --override-compiler ./run.sh | ./check.sh +# RUN: cd %T/runs_analyzer; %{scan-build} -o . --override-compiler ./run.sh | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── check.sh +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c ./src/empty.c -o ./src/empty.o -Dver=1; +"\$CC" -c "${root_dir}/src/empty.c" -o ./src/empty.o -Dver=2; +true; +EOF +chmod +x ${build_file} + +checker_file="${root_dir}/check.sh" +cat >> ${checker_file} << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +runs=\$(grep "exec command" | sort | uniq) + +assert_present() { + local pattern="\$1"; + if [ \$(echo "\$runs" | grep -- "\$pattern" | wc -l) -eq 0 ]; then + false; + fi +} + +assert_present "ver=1" +assert_present "ver=2" +EOF +chmod +x ${checker_file} Index: tools/scan-build-py/tests/functional/cases/analyze/wrapper/compiler_name_from_argument.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/wrapper/compiler_name_from_argument.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T +# RUN: cd %T; %{scan-wrapped-build} -o . --intercept-first ./run.sh | ./check.sh +# RUN: cd %T; %{scan-wrapped-build} -o . --intercept-first --override-compiler ./run.sh | ./check.sh +# RUN: cd %T; %{scan-wrapped-build} -o . --override-compiler ./run.sh | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── wrapper +# ├── wrapper++ +# ├── run.sh +# ├── check.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +wrapper_file="${root_dir}/wrapper" +cat >> ${wrapper_file} << EOF +#!/usr/bin/env bash + +set -o xtrace + +${REAL_CC} \$@ +EOF +chmod +x ${wrapper_file} + +wrapperxx_file="${root_dir}/wrapper++" +cat >> ${wrapperxx_file} << EOF +#!/usr/bin/env bash + +set -o xtrace + +${REAL_CXX} \$@ +EOF +chmod +x ${wrapperxx_file} + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c -o ./src/empty.o ./src/broken.c; +"\$CXX" -c -o ./src/empty.o ./src/broken.c; + +cd src +"\$CC" -c -o ./empty.o ./broken.c; +"\$CXX" -c -o ./empty.o ./broken.c; +EOF +chmod +x ${build_file} + +check_two="${root_dir}/check.sh" +cat >> "${check_two}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ -d "\$out_dir" ] +then + ls "\$out_dir/index.html" + ls \$out_dir/report-*.html +else + echo "output directory should exists" + false +fi +EOF +chmod +x "${check_two}" Index: tools/scan-build-py/tests/functional/cases/analyze/wrapper/lit.local.cfg =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/analyze/wrapper/lit.local.cfg @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import os.path +import lit.util + +this_dir = os.path.dirname(__file__) + +wrapper = os.path.join(this_dir, 'Output', 'wrapper') +wrapperpp = os.path.join(this_dir, 'Output', 'wrapper++') + +config.substitutions.append( + ('%{scan-wrapped-build}', + 'scan-build --use-cc={0} --use-c++={1} -vvvv'.format(wrapper,wrapperpp))) + +# use compiler wrapper +config.environment['REAL_CC'] = config.environment['CC'] +config.environment['REAL_CXX'] = config.environment['CXX'] +config.environment['CC'] = wrapper +config.environment['CXX'] = wrapperpp Index: tools/scan-build-py/tests/functional/cases/intercept/broken_build.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/broken_build.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/broken_build +# RUN: cd %T/broken_build; %{intercept-build} --cdb wrapper.json --override-compiler ./run.sh +# RUN: cd %T/broken_build; cdb_diff wrapper.json expected.json +# +# when library preload disabled, it falls back to use compiler wrapper +# +# RUN: cd %T/broken_build; %{intercept-build} --cdb preload.json ./run.sh +# RUN: cd %T/broken_build; cdb_diff preload.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/compile_error.c" "${root_dir}/src/broken.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c -o src/broken.o -Dver=1 src/broken.c; +"\$CXX" -c -o src/broken.o -Dver=2 src/broken.c; + +cd src +"\$CC" -c -o broken.o -Dver=3 broken.c; +"\$CXX" -c -o broken.o -Dver=4 broken.c; + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -o src/broken.o -Dver=1 src/broken.c", + "directory": "${root_dir}", + "file": "src/broken.c" +} +, +{ + "command": "c++ -c -o src/broken.o -Dver=2 src/broken.c", + "directory": "${root_dir}", + "file": "src/broken.c" +} +, +{ + "command": "cc -c -o broken.o -Dver=3 broken.c", + "directory": "${root_dir}/src", + "file": "broken.c" +} +, +{ + "command": "c++ -c -o broken.o -Dver=4 broken.c", + "directory": "${root_dir}/src", + "file": "broken.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/clean_env_build_intercept.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/clean_env_build_intercept.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# REQUIRES: preload +# RUN: bash %s %T/clean_env_build +# RUN: cd %T/clean_env_build; %{intercept-build} --cdb result.json env - ./run.sh +# RUN: cd %T/clean_env_build; cdb_diff result.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# └── empty.c + +clang=$(command -v ${CC}) +clangpp=$(command -v ${CXX}) + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +${clang} -c -o src/empty.o -Dver=1 src/empty.c; +${clangpp} -c -o src/empty.o -Dver=2 src/empty.c; + +cd src +${clang} -c -o empty.o -Dver=3 empty.c; +${clangpp} -c -o empty.o -Dver=4 empty.c; + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "cc -c -o empty.o -Dver=3 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +, +{ + "command": "c++ -c -o empty.o -Dver=4 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/deal_with_existing_database.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/deal_with_existing_database.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/extend_build +# RUN: cd %T/extend_build; %{intercept-build} --cdb result.json ./run-one.sh +# RUN: cd %T/extend_build; cdb_diff result.json one.json +# RUN: cd %T/extend_build; %{intercept-build} --cdb result.json ./run-two.sh +# RUN: cd %T/extend_build; cdb_diff result.json two.json +# RUN: cd %T/extend_build; %{intercept-build} --cdb result.json --append ./run-one.sh +# RUN: cd %T/extend_build; cdb_diff result.json sum.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run-one.sh +# ├── run-two.sh +# ├── one.json +# ├── two.json +# ├── sum.json +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +build_file="${root_dir}/run-one.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c -o src/empty.o -Dver=1 src/empty.c; +"\$CXX" -c -o src/empty.o -Dver=2 src/empty.c; + +true; +EOF +chmod +x ${build_file} + +build_file="${root_dir}/run-two.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +cd src +"\$CC" -c -o empty.o -Dver=3 empty.c; +"\$CXX" -c -o empty.o -Dver=4 empty.c; + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/one.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +] +EOF + +cat >> "${root_dir}/two.json" << EOF +[ +{ + "command": "cc -c -o empty.o -Dver=3 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +, +{ + "command": "c++ -c -o empty.o -Dver=4 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +] +EOF + +cat >> "${root_dir}/sum.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "cc -c -o empty.o -Dver=3 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +, +{ + "command": "c++ -c -o empty.o -Dver=4 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/exec/CMakeLists.txt =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exec/CMakeLists.txt @@ -0,0 +1,32 @@ +project(exec C) + +cmake_minimum_required(VERSION 2.8) + +include(CheckCCompilerFlag) +check_c_compiler_flag("-std=c99" C99_SUPPORTED) +if (C99_SUPPORTED) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99") +endif() + +include(CheckFunctionExists) +include(CheckSymbolExists) + +add_definitions(-D_GNU_SOURCE) +list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) + +check_function_exists(execve HAVE_EXECVE) +check_function_exists(execv HAVE_EXECV) +check_function_exists(execvpe HAVE_EXECVPE) +check_function_exists(execvp HAVE_EXECVP) +check_function_exists(execvP HAVE_EXECVP2) +check_function_exists(exect HAVE_EXECT) +check_function_exists(execl HAVE_EXECL) +check_function_exists(execlp HAVE_EXECLP) +check_function_exists(execle HAVE_EXECLE) +check_function_exists(posix_spawn HAVE_POSIX_SPAWN) +check_function_exists(posix_spawnp HAVE_POSIX_SPAWNP) + +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +add_executable(exec main.c) Index: tools/scan-build-py/tests/functional/cases/intercept/exec/config.h.in =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exec/config.h.in @@ -0,0 +1,20 @@ +/* -*- coding: utf-8 -*- +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +*/ + +#pragma once + +#cmakedefine HAVE_EXECVE +#cmakedefine HAVE_EXECV +#cmakedefine HAVE_EXECVPE +#cmakedefine HAVE_EXECVP +#cmakedefine HAVE_EXECVP2 +#cmakedefine HAVE_EXECT +#cmakedefine HAVE_EXECL +#cmakedefine HAVE_EXECLP +#cmakedefine HAVE_EXECLE +#cmakedefine HAVE_POSIX_SPAWN +#cmakedefine HAVE_POSIX_SPAWNP Index: tools/scan-build-py/tests/functional/cases/intercept/exec/main.c =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exec/main.c @@ -0,0 +1,337 @@ +/* -*- coding: utf-8 -*- +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +*/ + +#include "config.h" + +#include <sys/wait.h> +#include <ctype.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <paths.h> + +#if defined HAVE_POSIX_SPAWN || defined HAVE_POSIX_SPAWNP +#include <spawn.h> +#endif + +// ..:: environment access fixer - begin ::.. +#ifdef HAVE_NSGETENVIRON +#include <crt_externs.h> +#else +extern char **environ; +#endif + +char **get_environ() { +#ifdef HAVE_NSGETENVIRON + return *_NSGetEnviron(); +#else + return environ; +#endif +} +// ..:: environment access fixer - end ::.. + +// ..:: test fixtures - begin ::.. +static char const *cwd = NULL; +static FILE *fd = NULL; +static int need_comma = 0; + +void expected_out_open(const char *expected) { + cwd = getcwd(NULL, 0); + fd = fopen(expected, "w"); + if (!fd) { + perror("fopen"); + exit(EXIT_FAILURE); + } + fprintf(fd, "[\n"); + need_comma = 0; +} + +void expected_out_close() { + fprintf(fd, "]\n"); + fclose(fd); + fd = NULL; + + free((void *)cwd); + cwd = NULL; +} + +void expected_out(const char *file) { + if (need_comma) + fprintf(fd, ",\n"); + else + need_comma = 1; + + fprintf(fd, "{\n"); + fprintf(fd, " \"directory\": \"%s\",\n", cwd); + fprintf(fd, " \"command\": \"cc -c %s\",\n", file); + fprintf(fd, " \"file\": \"%s\"\n", file); + fprintf(fd, "}\n"); +} + +void create_source(char *file) { + FILE *fd = fopen(file, "w"); + if (!fd) { + perror("fopen"); + exit(EXIT_FAILURE); + } + fprintf(fd, "typedef int score;\n"); + fclose(fd); +} + +typedef void (*exec_fun)(); + +void wait_for(pid_t child) { + int status; + if (-1 == waitpid(child, &status, 0)) { + perror("wait"); + exit(EXIT_FAILURE); + } + if (WIFEXITED(status) ? WEXITSTATUS(status) : EXIT_FAILURE) { + fprintf(stderr, "children process has non zero exit code\n"); + exit(EXIT_FAILURE); + } +} + +#define FORK(FUNC) \ + { \ + pid_t child = fork(); \ + if (-1 == child) { \ + perror("fork"); \ + exit(EXIT_FAILURE); \ + } else if (0 == child) { \ + FUNC fprintf(stderr, "children process failed to exec\n"); \ + exit(EXIT_FAILURE); \ + } else { \ + wait_for(child); \ + } \ + } +// ..:: test fixtures - end ::.. + +#ifdef HAVE_EXECV +void call_execv() { + char *const file = "execv.c"; + char *const compiler = "/usr/bin/cc"; + char *const argv[] = {"cc", "-c", file, 0}; + + expected_out(file); + create_source(file); + + FORK(execv(compiler, argv);) +} +#endif + +#ifdef HAVE_EXECVE +void call_execve() { + char *const file = "execve.c"; + char *const compiler = "/usr/bin/cc"; + char *const argv[] = {compiler, "-c", file, 0}; + char *const envp[] = {"THIS=THAT", 0}; + + expected_out(file); + create_source(file); + + FORK(execve(compiler, argv, envp);) +} +#endif + +#ifdef HAVE_EXECVP +void call_execvp() { + char *const file = "execvp.c"; + char *const compiler = "cc"; + char *const argv[] = {compiler, "-c", file, 0}; + + expected_out(file); + create_source(file); + + FORK(execvp(compiler, argv);) +} +#endif + +#ifdef HAVE_EXECVP2 +void call_execvP() { + char *const file = "execv_p.c"; + char *const compiler = "cc"; + char *const argv[] = {compiler, "-c", file, 0}; + + expected_out(file); + create_source(file); + + FORK(execvP(compiler, _PATH_DEFPATH, argv);) +} +#endif + +#ifdef HAVE_EXECVPE +void call_execvpe() { + char *const file = "execvpe.c"; + char *const compiler = "cc"; + char *const argv[] = {"/usr/bin/cc", "-c", file, 0}; + char *const envp[] = {"THIS=THAT", 0}; + + expected_out(file); + create_source(file); + + FORK(execvpe(compiler, argv, envp);) +} +#endif + +#ifdef HAVE_EXECT +void call_exect() { + char *const file = "exect.c"; + char *const compiler = "/usr/bin/cc"; + char *const argv[] = {compiler, "-c", file, 0}; + char *const envp[] = {"THIS=THAT", 0}; + + expected_out(file); + create_source(file); + + FORK(exect(compiler, argv, envp);) +} +#endif + +#ifdef HAVE_EXECL +void call_execl() { + char *const file = "execl.c"; + char *const compiler = "/usr/bin/cc"; + + expected_out(file); + create_source(file); + + FORK(execl(compiler, "cc", "-c", file, (char *)0);) +} +#endif + +#ifdef HAVE_EXECLP +void call_execlp() { + char *const file = "execlp.c"; + char *const compiler = "cc"; + + expected_out(file); + create_source(file); + + FORK(execlp(compiler, compiler, "-c", file, (char *)0);) +} +#endif + +#ifdef HAVE_EXECLE +void call_execle() { + char *const file = "execle.c"; + char *const compiler = "/usr/bin/cc"; + char *const envp[] = {"THIS=THAT", 0}; + + expected_out(file); + create_source(file); + + FORK(execle(compiler, compiler, "-c", file, (char *)0, envp);) +} +#endif + +#ifdef HAVE_POSIX_SPAWN +void call_posix_spawn() { + char *const file = "posix_spawn.c"; + char *const compiler = "cc"; + char *const argv[] = {compiler, "-c", file, 0}; + + expected_out(file); + create_source(file); + + pid_t child; + if (0 != posix_spawn(&child, "/usr/bin/cc", 0, 0, argv, get_environ())) { + perror("posix_spawn"); + exit(EXIT_FAILURE); + } + wait_for(child); +} +#endif + +#ifdef HAVE_POSIX_SPAWNP +void call_posix_spawnp() { + char *const file = "posix_spawnp.c"; + char *const compiler = "cc"; + char *const argv[] = {compiler, "-c", file, 0}; + + expected_out(file); + create_source(file); + + pid_t child; + if (0 != posix_spawnp(&child, "cc", 0, 0, argv, get_environ())) { + perror("posix_spawnp"); + exit(EXIT_FAILURE); + } + wait_for(child); +} +#endif + +int main(int argc, char *const argv[]) { + + char *workdir = NULL; + char *output = NULL; + int c = 0; + + opterr = 0; + while ((c = getopt (argc, argv, "C:o:")) != -1) { + switch (c) { + case 'C': + workdir = optarg; + break; + case 'o': + output = optarg; + break; + case '?': + if (optopt == 'C' || optopt == 'o') + fprintf (stderr, "Option -%c requires an argument.\n", optopt); + else if (isprint (optopt)) + fprintf (stderr, "Unknown option `-%c'.\n", optopt); + else + fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); + return 1; + default: + abort(); + } + } + for (int index = optind; index < argc; ++index) + printf ("Non-option argument %s\n", argv[index]); + + if (workdir != NULL) { + chdir(workdir); + } + expected_out_open(output); +#ifdef HAVE_EXECV + call_execv(); +#endif +#ifdef HAVE_EXECVE + call_execve(); +#endif +#ifdef HAVE_EXECVP + call_execvp(); +#endif +#ifdef HAVE_EXECVP2 + call_execvP(); +#endif +#ifdef HAVE_EXECVPE + call_execvpe(); +#endif +#ifdef HAVE_EXECT + call_exect(); +#endif +#ifdef HAVE_EXECL + call_execl(); +#endif +#ifdef HAVE_EXECLP + call_execlp(); +#endif +#ifdef HAVE_EXECLE + call_execle(); +#endif +#ifdef HAVE_POSIX_SPAWN + call_posix_spawn(); +#endif +#ifdef HAVE_POSIX_SPAWNP + call_posix_spawnp(); +#endif + expected_out_close(); + return 0; +} Index: tools/scan-build-py/tests/functional/cases/intercept/exec/run_exec_test.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exec/run_exec_test.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +# REQUIRES: preload +# RUN: cmake -B%T -H%S +# RUN: make -C %T +# RUN: intercept-build --cdb %T/result.json %T/exec -C %T -o expected.json +# RUN: cdb_diff %T/result.json %T/expected.json Index: tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_empty.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_empty.py @@ -0,0 +1,2 @@ +# XFAIL: * +# RUN: %{intercept-build} Index: tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_fail.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_fail.py @@ -0,0 +1,3 @@ +# XFAIL: * +# RUN: mkdir %T/exit_code_for_fail +# RUN: cd %T/exit_code_for_fail; %{intercept-build} false Index: tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_help.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_help.py @@ -0,0 +1 @@ +# RUN: %{intercept-build} --help Index: tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_success.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/exit_code_for_success.py @@ -0,0 +1,2 @@ +# RUN: mkdir %T/exit_code_for_success +# RUN: cd %T/exit_code_for_success; %{intercept-build} true Index: tools/scan-build-py/tests/functional/cases/intercept/flags_filtered.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/flags_filtered.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/flags_filtered +# RUN: cd %T/flags_filtered; %{intercept-build} --cdb result.json ./run.sh +# RUN: cd %T/flags_filtered; cdb_diff result.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# ├── lib.c +# └── main.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cat >> "${root_dir}/src/lib.c" << EOF +int foo() { return 2; } +EOF + +cat >> "${root_dir}/src/main.c" << EOF +int main() { return 0; } +EOF + + +# set up platform specific linker options +PREFIX="fooflag" +if [ $(uname | grep -i "darwin") ]; then + LD_FLAGS="-o lib${PREFIX}.dylib -dynamiclib -install_name @rpath/${PREFIX}" +else + LD_FLAGS="-o lib${PREFIX}.so -shared -Wl,-soname,${PREFIX}" +fi + + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +# set up unique names for this test + +cd src + +# non compilation calls shall not be in the result +"\$CC" -### -c main.c 2> /dev/null +"\$CC" -E -o "\$\$.i" main.c +"\$CC" -S -o "\$\$.asm" main.c +"\$CC" -c -o "\$\$.d" -M main.c +"\$CC" -c -o "\$\$.d" -MM main.c + +# preprocessor flags shall be filtered +"\$CC" -c -o one.o -fpic -MD -MT target -MF one.d lib.c +"\$CC" -c -o two.o -fpic -MMD -MQ target -MF two.d lib.c + +# linking shall not in the result +"\$CC" ${LD_FLAGS} one.o two.o + +# linker flags shall be filtered +"\$CC" -o "${PREFIX}_one" "-l${PREFIX}" -L. main.c +"\$CC" -o "${PREFIX}_two" -l "${PREFIX}" -L . main.c + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ + { + "command": "cc -c -o one.o -fpic lib.c", + "directory": "${root_dir}/src", + "file": "lib.c" + }, + { + "command": "cc -c -o two.o -fpic lib.c", + "directory": "${root_dir}/src", + "file": "lib.c" + }, + { + "command": "cc -c -o fooflag_one main.c", + "directory": "${root_dir}/src", + "file": "main.c" + }, + { + "command": "cc -c -o fooflag_two main.c", + "directory": "${root_dir}/src", + "file": "main.c" + } +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/multiple_source_single_command.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/multiple_source_single_command.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/multiple_source_build +# RUN: cd %T/multiple_source_build; %{intercept-build} --cdb result.json ./run.sh +# RUN: cd %T/multiple_source_build; cdb_diff result.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# ├── main.c +# ├── one.c +# └── two.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/one.c" +touch "${root_dir}/src/two.c" +cp "${test_input_dir}/main.c" "${root_dir}/src/main.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -Dver=1 src/one.c src/two.c src/main.c; + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -Dver=1 src/one.c", + "directory": "${root_dir}", + "file": "src/one.c" +} +, +{ + "command": "cc -c -Dver=1 src/two.c", + "directory": "${root_dir}", + "file": "src/two.c" +} +, +{ + "command": "cc -c -Dver=1 src/main.c", + "directory": "${root_dir}", + "file": "src/main.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/noisy_build.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/noisy_build.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/noisy_build +# RUN: cd %T/noisy_build; %{intercept-build} --cdb wrapper.json --override-compiler ./run.sh +# RUN: cd %T/noisy_build; cdb_diff wrapper.json expected.json +# +# when library preload disabled, it falls back to use compiler wrapper +# +# RUN: cd %T/noisy_build; %{intercept-build} --cdb preload.json ./run.sh +# RUN: cd %T/noisy_build; cdb_diff preload.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +echo "hi there \"people\"" +echo "hi again" + +"\$CC" -c -o src/empty.o -Dver=1 src/empty.c; +"\$CXX" -c -o src/empty.o -Dver=2 src/empty.c; + +bash -c "\ +mkdir -p ./this/that \ +touch ./this/that \ +rm -rf ./this" + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/parallel_build.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/parallel_build.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/parallel_build +# RUN: cd %T/parallel_build; %{intercept-build} --cdb wrapper.json --override-compiler ./run.sh +# RUN: cd %T/parallel_build; cdb_diff wrapper.json expected.json +# +# when library preload disabled, it falls back to use compiler wrapper +# +# RUN: cd %T/parallel_build; %{intercept-build} --cdb preload.json ./run.sh +# RUN: cd %T/parallel_build; cdb_diff preload.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c -o src/empty.o -Dver=1 src/empty.c & +"\$CXX" -c -o src/empty.o -Dver=2 src/empty.c & + +cd src + +"\$CC" -c -o empty.o -Dver=3 empty.c & +"\$CXX" -c -o empty.o -Dver=4 empty.c & + +wait + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "cc -c -o empty.o -Dver=3 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +, +{ + "command": "c++ -c -o empty.o -Dver=4 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/successful_build.bat =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/successful_build.bat @@ -0,0 +1,53 @@ +: RUN: %s %T\successful_build +: RUN: cd %T\successful_build; intercept-build -vvv --override-compiler --cdb wrapper.json run.bat +: RUN: cd %T\successful_build; cdb_diff wrapper.json expected.json + +set root_dir=%1 + +mkdir "%root_dir%" +mkdir "%root_dir%\src" + +copy /y nul "%root_dir%\src\empty.c" + +echo ^ +%%CC%% -c -o src\empty.o -Dver=1 src\empty.c ^ + +^ + +%%CXX%% -c -o src\empty.o -Dver=2 src\empty.c ^ + +^ + +cd src ^ + +^ + +%%CC%% -c -o empty.o -Dver=3 empty.c ^ + +^ + +%%CXX%% -c -o empty.o -Dver=4 empty.c ^ + +> "%root_dir%\run.bat" + +set output="%root_dir%\expected.json" +del /f %output% + +cd "%root_dir%" +cdb_expect ^ + --cdb %output% ^ + --command "cc -c -o src\empty.o -Dver=1 src\empty.c" ^ + --file "src\empty.c" +cdb_expect ^ + --cdb %output% ^ + --command "c++ -c -o src\empty.o -Dver=2 src\empty.c" ^ + --file "src\empty.c" +cd src +cdb_expect ^ + --cdb %output% ^ + --command "cc -c -o empty.o -Dver=3 empty.c" ^ + --file "empty.c" +cdb_expect ^ + --cdb %output% ^ + --command "c++ -c -o empty.o -Dver=4 empty.c" ^ + --file "empty.c" Index: tools/scan-build-py/tests/functional/cases/intercept/successful_build.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/successful_build.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/successful_build +# RUN: cd %T/successful_build; %{intercept-build} --cdb wrapper.json --override-compiler ./run.sh +# RUN: cd %T/successful_build; cdb_diff wrapper.json expected.json +# +# when library preload disabled, it falls back to use compiler wrapper +# +# RUN: cd %T/successful_build; %{intercept-build} --cdb preload.json ./run.sh +# RUN: cd %T/successful_build; cdb_diff preload.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── expected.json +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c -o src/empty.o -Dver=1 src/empty.c; +"\$CXX" -c -o src/empty.o -Dver=2 src/empty.c; + +cd src +"\$CC" -c -o empty.o -Dver=3 empty.c; +"\$CXX" -c -o empty.o -Dver=4 empty.c; + +true; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "cc -c -o empty.o -Dver=3 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +, +{ + "command": "c++ -c -o empty.o -Dver=4 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/wrapper/compiler_name_from_argument.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/wrapper/compiler_name_from_argument.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T +# RUN: cd %T; %{intercept-wrapped-build} --cdb wrapper.json --override-compiler ./run.sh +# RUN: cd %T; cdb_diff wrapper.json expected.json +# +# when library preload disabled, it falls back to use compiler wrapper +# +# RUN: cd %T; %{intercept-wrapped-build} --cdb preload.json ./run.sh +# RUN: cd %T; cdb_diff preload.json expected.json + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── wrapper +# ├── wrapper++ +# ├── run.sh +# ├── expected.json +# └── src +# └── empty.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +touch "${root_dir}/src/empty.c" + +wrapper_file="${root_dir}/wrapper" +cat >> ${wrapper_file} << EOF +#!/usr/bin/env bash + +set -o xtrace + +${REAL_CC} \$@ +EOF +chmod +x ${wrapper_file} + +wrapperxx_file="${root_dir}/wrapper++" +cat >> ${wrapperxx_file} << EOF +#!/usr/bin/env bash + +set -o xtrace + +${REAL_CXX} \$@ +EOF +chmod +x ${wrapperxx_file} + +build_file="${root_dir}/run.sh" +cat >> ${build_file} << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +"\$CC" -c -o src/empty.o -Dver=1 src/empty.c; +"\$CXX" -c -o src/empty.o -Dver=2 src/empty.c; + +cd src +"\$CC" -c -o empty.o -Dver=3 empty.c; +"\$CXX" -c -o empty.o -Dver=4 empty.c; +EOF +chmod +x ${build_file} + +cat >> "${root_dir}/expected.json" << EOF +[ +{ + "command": "cc -c -o src/empty.o -Dver=1 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "c++ -c -o src/empty.o -Dver=2 src/empty.c", + "directory": "${root_dir}", + "file": "src/empty.c" +} +, +{ + "command": "cc -c -o empty.o -Dver=3 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +, +{ + "command": "c++ -c -o empty.o -Dver=4 empty.c", + "directory": "${root_dir}/src", + "file": "empty.c" +} +] +EOF Index: tools/scan-build-py/tests/functional/cases/intercept/wrapper/lit.local.cfg =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/intercept/wrapper/lit.local.cfg @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import os.path +import lit.util + +this_dir = os.path.dirname(__file__) + +wrapper = os.path.join(this_dir, 'Output', 'wrapper') +wrapperpp = os.path.join(this_dir, 'Output', 'wrapper++') + +config.substitutions.append( + ('%{intercept-wrapped-build}', + 'intercept-build --use-cc={0} --use-c++={1} -vvvv'.format(wrapper, wrapperpp))) + +# use compiler wrapper +config.environment['REAL_CC'] = config.environment['CC'] +config.environment['REAL_CXX'] = config.environment['CXX'] +config.environment['CC'] = wrapper +config.environment['CXX'] = wrapperpp Index: tools/scan-build-py/tests/functional/cases/report/report_dir_empty_cleaned.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/report/report_dir_empty_cleaned.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# RUN: mkdir %T/output_dir_clean_when_empty +# RUN: cd %T/output_dir_clean_when_empty; %{scan-build} --output . true | bash %s +# RUN: cd %T/output_dir_clean_when_empty; %{scan-build} --output . --status-bugs true | bash %s +# RUN: cd %T/output_dir_clean_when_empty; %{scan-build} --output . --status-bugs false | bash %s +# RUN: cd %T/output_dir_clean_when_empty; %{scan-build} --output . --status-bugs --plist true | bash %s +# RUN: cd %T/output_dir_clean_when_empty; %{scan-build} --output . --status-bugs --plist false | bash %s + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ -d "$out_dir" ] +then + echo "output directory should not exists" + false +fi Index: tools/scan-build-py/tests/functional/cases/report/report_dir_empty_kept.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/report/report_dir_empty_kept.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# RUN: mkdir %T/output_dir_kept_when_empty +# RUN: cd %T/output_dir_kept_when_empty; %{scan-build} --keep-empty --output . true | bash %s +# RUN: cd %T/output_dir_kept_when_empty; %{scan-build} --keep-empty --output . --status-bugs true | bash %s +# RUN: cd %T/output_dir_kept_when_empty; %{scan-build} --keep-empty --output . --status-bugs false | bash %s +# RUN: cd %T/output_dir_kept_when_empty; %{scan-build} --keep-empty --output . --status-bugs --plist true | bash %s +# RUN: cd %T/output_dir_kept_when_empty; %{scan-build} --keep-empty --output . --status-bugs --plist false | bash %s + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ ! -d "$out_dir" ] +then + echo "output directory should exists" + false +fi Index: tools/scan-build-py/tests/functional/cases/report/report_dir_kept.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/report/report_dir_kept.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/output_dir_kept +# RUN: cd %T/output_dir_kept; %{scan-build} --output . ./run.sh | ./check.sh +# RUN: cd %T/output_dir_kept; %{scan-build} --output . --plist ./run.sh | ./check.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── check.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +build_file="${root_dir}/run.sh" +cat >> "${build_file}" << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +\${CC} -c -o src/broken.o src/broken.c +true +EOF +chmod +x "${build_file}" + +checker_file="${root_dir}/check.sh" +cat >> "${checker_file}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ ! -d "\$out_dir" ] +then + echo "output directory should exists" + false +fi +EOF +chmod +x "${checker_file}" Index: tools/scan-build-py/tests/functional/cases/report/report_failures.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/report/report_failures.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/report_failures +# RUN: cd %T/report_failures; %{analyze-build} --output . --keep-empty --cdb input.json | ./check_exists.sh +# RUN: cd %T/report_failures; %{analyze-build} --no-failure-reports --output . --keep-empty --cdb input.json | ./check_not_exists.sh +# +# RUN: cd %T/report_failures; %{analyze-build} --output . --keep-empty --plist-html --cdb input.json | ./check_exists.sh +# RUN: cd %T/report_failures; %{analyze-build} --no-failure-reports --output . --keep-empty --plist-html --cdb input.json | ./check_not_exists.sh +# +# RUN: cd %T/report_failures; %{analyze-build} --output . --keep-empty --plist --cdb input.json | ./check_exists.sh +# RUN: cd %T/report_failures; %{analyze-build} --no-failure-reports --output . --keep-empty --plist --cdb input.json | ./check_not_exists.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── input.json +# ├── check_exists.sh +# ├── check_not_exists.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/compile_error.c" "${root_dir}/src/broken.c" + +cat >> "${root_dir}/input.json" << EOF +[ + { + "directory": "${root_dir}", + "file": "${root_dir}/src/broken.c", + "command": "cc -c -o src/broken.o src/broken.c" + } +] +EOF + +check_one="${root_dir}/check_exists.sh" +cat >> "${check_one}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ ! -d "\$out_dir" ] +then + echo "output directory should exists" + false +else + if [ ! -d "\$out_dir/failures" ] + then + echo "failure directory should exists" + false + fi +fi +EOF +chmod +x "${check_one}" + +check_two="${root_dir}/check_not_exists.sh" +cat >> "${check_two}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ ! -d "\$out_dir" ] +then + echo "output directory should exists" + false +else + if [ -d "\$out_dir/failures" ] + then + echo "failure directory should not exists" + false + fi +fi +EOF +chmod +x "${check_two}" Index: tools/scan-build-py/tests/functional/cases/report/report_file_format.sh =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/cases/report/report_file_format.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# RUN: bash %s %T/report_file_format +# RUN: cd %T/report_file_format; %{scan-build} --output . --keep-empty ./run.sh | ./check_html.sh +# RUN: cd %T/report_file_format; %{scan-build} --output . --keep-empty --plist ./run.sh | ./check_plist.sh +# RUN: cd %T/report_file_format; %{scan-build} --output . --keep-empty --plist-html ./run.sh | ./check_html.sh +# RUN: cd %T/report_file_format; %{scan-build} --output . --keep-empty --plist-html ./run.sh | ./check_plist.sh + +set -o errexit +set -o nounset +set -o xtrace + +# the test creates a subdirectory inside output dir. +# +# ${root_dir} +# ├── run.sh +# ├── check_plist.sh +# ├── check_html.sh +# └── src +# └── broken.c + +root_dir=$1 +mkdir -p "${root_dir}/src" + +cp "${test_input_dir}/div_zero.c" "${root_dir}/src/broken.c" + +build_file="${root_dir}/run.sh" +cat >> "${build_file}" << EOF +#!/usr/bin/env bash + +set -o nounset +set -o xtrace + +\${CC} -c -o src/broken.o src/broken.c +true +EOF +chmod +x "${build_file}" + +check_one="${root_dir}/check_plist.sh" +cat >> "${check_one}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ -d "\$out_dir" ] +then + ls \$out_dir/*.plist +else + echo "output directory should exists" + false +fi +EOF +chmod +x "${check_one}" + +check_two="${root_dir}/check_html.sh" +cat >> "${check_two}" << EOF +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o xtrace + +out_dir=\$(sed -n 's/\(.*\) Report directory created: \(.*\)/\2/p') +if [ -d "\$out_dir" ] +then + ls "\$out_dir/index.html" + ls \$out_dir/report-*.html +else + echo "output directory should exists" + false +fi +EOF +chmod +x "${check_two}" Index: tools/scan-build-py/tests/functional/cases/test_create_cdb.py =================================================================== --- tools/scan-build-py/tests/functional/cases/test_create_cdb.py +++ /dev/null @@ -1,191 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import libear -from . import make_args, silent_check_call, silent_call, create_empty_file -import unittest - -import os.path -import json - - -class CompilationDatabaseTest(unittest.TestCase): - @staticmethod - def run_intercept(tmpdir, args): - result = os.path.join(tmpdir, 'cdb.json') - make = make_args(tmpdir) + args - silent_check_call( - ['intercept-build', '--cdb', result] + make) - return result - - @staticmethod - def count_entries(filename): - with open(filename, 'r') as handler: - content = json.load(handler) - return len(content) - - def test_successful_build(self): - with libear.TemporaryDirectory() as tmpdir: - result = self.run_intercept(tmpdir, ['build_regular']) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(5, self.count_entries(result)) - - def test_successful_build_with_wrapper(self): - with libear.TemporaryDirectory() as tmpdir: - result = os.path.join(tmpdir, 'cdb.json') - make = make_args(tmpdir) + ['build_regular'] - silent_check_call(['intercept-build', '--cdb', result, - '--override-compiler'] + make) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(5, self.count_entries(result)) - - @unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu make return -11') - def test_successful_build_parallel(self): - with libear.TemporaryDirectory() as tmpdir: - result = self.run_intercept(tmpdir, ['-j', '4', 'build_regular']) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(5, self.count_entries(result)) - - @unittest.skipIf(os.getenv('TRAVIS'), 'ubuntu env remove clang from path') - def test_successful_build_on_empty_env(self): - with libear.TemporaryDirectory() as tmpdir: - result = os.path.join(tmpdir, 'cdb.json') - make = make_args(tmpdir) + ['CC=clang', 'build_regular'] - silent_check_call(['intercept-build', '--cdb', result, - 'env', '-'] + make) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(5, self.count_entries(result)) - - def test_successful_build_all_in_one(self): - with libear.TemporaryDirectory() as tmpdir: - result = self.run_intercept(tmpdir, ['build_all_in_one']) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(5, self.count_entries(result)) - - def test_not_successful_build(self): - with libear.TemporaryDirectory() as tmpdir: - result = os.path.join(tmpdir, 'cdb.json') - make = make_args(tmpdir) + ['build_broken'] - silent_call( - ['intercept-build', '--cdb', result] + make) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(2, self.count_entries(result)) - - -class ExitCodeTest(unittest.TestCase): - @staticmethod - def run_intercept(tmpdir, target): - result = os.path.join(tmpdir, 'cdb.json') - make = make_args(tmpdir) + [target] - return silent_call( - ['intercept-build', '--cdb', result] + make) - - def test_successful_build(self): - with libear.TemporaryDirectory() as tmpdir: - exitcode = self.run_intercept(tmpdir, 'build_clean') - self.assertFalse(exitcode) - - def test_not_successful_build(self): - with libear.TemporaryDirectory() as tmpdir: - exitcode = self.run_intercept(tmpdir, 'build_broken') - self.assertTrue(exitcode) - - -class ResumeFeatureTest(unittest.TestCase): - @staticmethod - def run_intercept(tmpdir, target, args): - result = os.path.join(tmpdir, 'cdb.json') - make = make_args(tmpdir) + [target] - silent_check_call( - ['intercept-build', '--cdb', result] + args + make) - return result - - @staticmethod - def count_entries(filename): - with open(filename, 'r') as handler: - content = json.load(handler) - return len(content) - - def test_overwrite_existing_cdb(self): - with libear.TemporaryDirectory() as tmpdir: - result = self.run_intercept(tmpdir, 'build_clean', []) - self.assertTrue(os.path.isfile(result)) - result = self.run_intercept(tmpdir, 'build_regular', []) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(2, self.count_entries(result)) - - def test_append_to_existing_cdb(self): - with libear.TemporaryDirectory() as tmpdir: - result = self.run_intercept(tmpdir, 'build_clean', []) - self.assertTrue(os.path.isfile(result)) - result = self.run_intercept(tmpdir, 'build_regular', ['--append']) - self.assertTrue(os.path.isfile(result)) - self.assertEqual(5, self.count_entries(result)) - - -class ResultFormatingTest(unittest.TestCase): - @staticmethod - def run_intercept(tmpdir, command): - result = os.path.join(tmpdir, 'cdb.json') - silent_check_call( - ['intercept-build', '--cdb', result] + command, - cwd=tmpdir) - with open(result, 'r') as handler: - content = json.load(handler) - return content - - def assert_creates_number_of_entries(self, command, count): - with libear.TemporaryDirectory() as tmpdir: - filename = os.path.join(tmpdir, 'test.c') - create_empty_file(filename) - command.append(filename) - cmd = ['sh', '-c', ' '.join(command)] - cdb = self.run_intercept(tmpdir, cmd) - self.assertEqual(count, len(cdb)) - - def test_filter_preprocessor_only_calls(self): - self.assert_creates_number_of_entries(['cc', '-c'], 1) - self.assert_creates_number_of_entries(['cc', '-c', '-E'], 0) - self.assert_creates_number_of_entries(['cc', '-c', '-M'], 0) - self.assert_creates_number_of_entries(['cc', '-c', '-MM'], 0) - - def assert_command_creates_entry(self, command, expected): - with libear.TemporaryDirectory() as tmpdir: - filename = os.path.join(tmpdir, command[-1]) - create_empty_file(filename) - cmd = ['sh', '-c', ' '.join(command)] - cdb = self.run_intercept(tmpdir, cmd) - self.assertEqual(' '.join(expected), cdb[0]['command']) - - def test_filter_preprocessor_flags(self): - self.assert_command_creates_entry( - ['cc', '-c', '-MD', 'test.c'], - ['cc', '-c', 'test.c']) - self.assert_command_creates_entry( - ['cc', '-c', '-MMD', 'test.c'], - ['cc', '-c', 'test.c']) - self.assert_command_creates_entry( - ['cc', '-c', '-MD', '-MF', 'test.d', 'test.c'], - ['cc', '-c', 'test.c']) - - def test_pass_language_flag(self): - self.assert_command_creates_entry( - ['cc', '-c', '-x', 'c', 'test.c'], - ['cc', '-c', '-x', 'c', 'test.c']) - self.assert_command_creates_entry( - ['cc', '-c', 'test.c'], - ['cc', '-c', 'test.c']) - - def test_pass_arch_flags(self): - self.assert_command_creates_entry( - ['clang', '-c', 'test.c'], - ['cc', '-c', 'test.c']) - self.assert_command_creates_entry( - ['clang', '-c', '-arch', 'i386', 'test.c'], - ['cc', '-c', '-arch', 'i386', 'test.c']) - self.assert_command_creates_entry( - ['clang', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c'], - ['cc', '-c', '-arch', 'i386', '-arch', 'armv7l', 'test.c']) Index: tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py =================================================================== --- tools/scan-build-py/tests/functional/cases/test_exec_anatomy.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import libear -import unittest - -import os.path -import subprocess -import json - - -def run(source_dir, target_dir): - def execute(cmd): - return subprocess.check_call(cmd, - cwd=target_dir, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - - execute(['cmake', source_dir]) - execute(['make']) - - result_file = os.path.join(target_dir, 'result.json') - expected_file = os.path.join(target_dir, 'expected.json') - execute(['intercept-build', '--cdb', result_file, './exec', - expected_file]) - return (expected_file, result_file) - - -class ExecAnatomyTest(unittest.TestCase): - def assertEqualJson(self, expected, result): - def read_json(filename): - with open(filename) as handler: - return json.load(handler) - - lhs = read_json(expected) - rhs = read_json(result) - for item in lhs: - self.assertTrue(rhs.count(item)) - for item in rhs: - self.assertTrue(lhs.count(item)) - - def test_all_exec_calls(self): - this_dir, _ = os.path.split(__file__) - source_dir = os.path.normpath(os.path.join(this_dir, '..', 'exec')) - with libear.TemporaryDirectory() as tmp_dir: - expected, result = run(source_dir, tmp_dir) - self.assertEqualJson(expected, result) Index: tools/scan-build-py/tests/functional/cases/test_from_cdb.py =================================================================== --- tools/scan-build-py/tests/functional/cases/test_from_cdb.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import libear -from . import call_and_report -import unittest - -import os.path -import string -import glob - - -def prepare_cdb(name, target_dir): - target_file = 'build_{0}.json'.format(name) - this_dir, _ = os.path.split(__file__) - path = os.path.normpath(os.path.join(this_dir, '..', 'src')) - source_dir = os.path.join(path, 'compilation_database') - source_file = os.path.join(source_dir, target_file + '.in') - target_file = os.path.join(target_dir, 'compile_commands.json') - with open(source_file, 'r') as in_handle: - with open(target_file, 'w') as out_handle: - for line in in_handle: - temp = string.Template(line) - out_handle.write(temp.substitute(path=path)) - return target_file - - -def run_analyzer(directory, cdb, args): - cmd = ['analyze-build', '--cdb', cdb, '--output', directory] \ - + args - return call_and_report(cmd, []) - - -class OutputDirectoryTest(unittest.TestCase): - def test_regular_keeps_report_dir(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, []) - self.assertTrue(os.path.isdir(reportdir)) - - def test_clear_deletes_report_dir(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('clean', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, []) - self.assertFalse(os.path.isdir(reportdir)) - - def test_clear_keeps_report_dir_when_asked(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('clean', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--keep-empty']) - self.assertTrue(os.path.isdir(reportdir)) - - -class ExitCodeTest(unittest.TestCase): - def test_regular_does_not_set_exit_code(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, __ = run_analyzer(tmpdir, cdb, []) - self.assertFalse(exit_code) - - def test_clear_does_not_set_exit_code(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('clean', tmpdir) - exit_code, __ = run_analyzer(tmpdir, cdb, []) - self.assertFalse(exit_code) - - def test_regular_sets_exit_code_if_asked(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, __ = run_analyzer(tmpdir, cdb, ['--status-bugs']) - self.assertTrue(exit_code) - - def test_clear_does_not_set_exit_code_if_asked(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('clean', tmpdir) - exit_code, __ = run_analyzer(tmpdir, cdb, ['--status-bugs']) - self.assertFalse(exit_code) - - def test_regular_sets_exit_code_if_asked_from_plist(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, __ = run_analyzer( - tmpdir, cdb, ['--status-bugs', '--plist']) - self.assertTrue(exit_code) - - def test_clear_does_not_set_exit_code_if_asked_from_plist(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('clean', tmpdir) - exit_code, __ = run_analyzer( - tmpdir, cdb, ['--status-bugs', '--plist']) - self.assertFalse(exit_code) - - -class OutputFormatTest(unittest.TestCase): - @staticmethod - def get_html_count(directory): - return len(glob.glob(os.path.join(directory, 'report-*.html'))) - - @staticmethod - def get_plist_count(directory): - return len(glob.glob(os.path.join(directory, 'report-*.plist'))) - - def test_default_creates_html_report(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, []) - self.assertTrue( - os.path.exists(os.path.join(reportdir, 'index.html'))) - self.assertEqual(self.get_html_count(reportdir), 2) - self.assertEqual(self.get_plist_count(reportdir), 0) - - def test_plist_and_html_creates_html_report(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--plist-html']) - self.assertTrue( - os.path.exists(os.path.join(reportdir, 'index.html'))) - self.assertEqual(self.get_html_count(reportdir), 2) - self.assertEqual(self.get_plist_count(reportdir), 5) - - def test_plist_does_not_creates_html_report(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('regular', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, ['--plist']) - self.assertFalse( - os.path.exists(os.path.join(reportdir, 'index.html'))) - self.assertEqual(self.get_html_count(reportdir), 0) - self.assertEqual(self.get_plist_count(reportdir), 5) - - -class FailureReportTest(unittest.TestCase): - def test_broken_creates_failure_reports(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('broken', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, []) - self.assertTrue( - os.path.isdir(os.path.join(reportdir, 'failures'))) - - def test_broken_does_not_creates_failure_reports(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('broken', tmpdir) - exit_code, reportdir = run_analyzer( - tmpdir, cdb, ['--no-failure-reports']) - self.assertFalse( - os.path.isdir(os.path.join(reportdir, 'failures'))) - - -class TitleTest(unittest.TestCase): - def assertTitleEqual(self, directory, expected): - import re - patterns = [ - re.compile(r'<title>(?P<page>.*)'), - re.compile(r'

(?P.*)

') - ] - result = dict() - - index = os.path.join(directory, 'index.html') - with open(index, 'r') as handler: - for line in handler.readlines(): - for regex in patterns: - match = regex.match(line.strip()) - if match: - result.update(match.groupdict()) - break - self.assertEqual(result['page'], result['head']) - self.assertEqual(result['page'], expected) - - def test_default_title_in_report(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('broken', tmpdir) - exit_code, reportdir = run_analyzer(tmpdir, cdb, []) - self.assertTitleEqual(reportdir, 'src - analyzer results') - - def test_given_title_in_report(self): - with libear.TemporaryDirectory() as tmpdir: - cdb = prepare_cdb('broken', tmpdir) - exit_code, reportdir = run_analyzer( - tmpdir, cdb, ['--html-title', 'this is the title']) - self.assertTitleEqual(reportdir, 'this is the title') Index: tools/scan-build-py/tests/functional/cases/test_from_cmd.py =================================================================== --- tools/scan-build-py/tests/functional/cases/test_from_cmd.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import libear -from . import make_args, check_call_and_report, create_empty_file -import unittest - -import os -import os.path -import glob - - -class OutputDirectoryTest(unittest.TestCase): - - @staticmethod - def run_analyzer(outdir, args, cmd): - return check_call_and_report( - ['scan-build', '--intercept-first', '-o', outdir] + args, - cmd) - - def test_regular_keeps_report_dir(self): - with libear.TemporaryDirectory() as tmpdir: - make = make_args(tmpdir) + ['build_regular'] - outdir = self.run_analyzer(tmpdir, [], make) - self.assertTrue(os.path.isdir(outdir)) - - def test_clear_deletes_report_dir(self): - with libear.TemporaryDirectory() as tmpdir: - make = make_args(tmpdir) + ['build_clean'] - outdir = self.run_analyzer(tmpdir, [], make) - self.assertFalse(os.path.isdir(outdir)) - - def test_clear_keeps_report_dir_when_asked(self): - with libear.TemporaryDirectory() as tmpdir: - make = make_args(tmpdir) + ['build_clean'] - outdir = self.run_analyzer(tmpdir, ['--keep-empty'], make) - self.assertTrue(os.path.isdir(outdir)) - - -class RunAnalyzerTest(unittest.TestCase): - - @staticmethod - def get_plist_count(directory): - return len(glob.glob(os.path.join(directory, 'report-*.plist'))) - - def test_interposition_works(self): - with libear.TemporaryDirectory() as tmpdir: - make = make_args(tmpdir) + ['build_regular'] - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--override-compiler'], - make) - - self.assertTrue(os.path.isdir(outdir)) - self.assertEqual(self.get_plist_count(outdir), 5) - - def test_intercept_wrapper_works(self): - with libear.TemporaryDirectory() as tmpdir: - make = make_args(tmpdir) + ['build_regular'] - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--intercept-first', - '--override-compiler'], - make) - - self.assertTrue(os.path.isdir(outdir)) - self.assertEqual(self.get_plist_count(outdir), 5) - - def test_intercept_library_works(self): - with libear.TemporaryDirectory() as tmpdir: - make = make_args(tmpdir) + ['build_regular'] - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--intercept-first'], - make) - - self.assertTrue(os.path.isdir(outdir)) - self.assertEqual(self.get_plist_count(outdir), 5) - - @staticmethod - def compile_empty_source_file(target_dir, is_cxx): - compiler = '$CXX' if is_cxx else '$CC' - src_file_name = 'test.cxx' if is_cxx else 'test.c' - src_file = os.path.join(target_dir, src_file_name) - obj_file = os.path.join(target_dir, 'test.o') - create_empty_file(src_file) - command = ' '.join([compiler, '-c', src_file, '-o', obj_file]) - return ['sh', '-c', command] - - def test_interposition_cc_works(self): - with libear.TemporaryDirectory() as tmpdir: - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--override-compiler'], - self.compile_empty_source_file(tmpdir, False)) - self.assertEqual(self.get_plist_count(outdir), 1) - - def test_interposition_cxx_works(self): - with libear.TemporaryDirectory() as tmpdir: - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--override-compiler'], - self.compile_empty_source_file(tmpdir, True)) - self.assertEqual(self.get_plist_count(outdir), 1) - - def test_intercept_cc_works(self): - with libear.TemporaryDirectory() as tmpdir: - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--override-compiler', - '--intercept-first'], - self.compile_empty_source_file(tmpdir, False)) - self.assertEqual(self.get_plist_count(outdir), 1) - - def test_intercept_cxx_works(self): - with libear.TemporaryDirectory() as tmpdir: - outdir = check_call_and_report( - ['scan-build', '--plist', '-o', tmpdir, '--override-compiler', - '--intercept-first'], - self.compile_empty_source_file(tmpdir, True)) - self.assertEqual(self.get_plist_count(outdir), 1) Index: tools/scan-build-py/tests/functional/exec/CMakeLists.txt =================================================================== --- tools/scan-build-py/tests/functional/exec/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -project(exec C) - -cmake_minimum_required(VERSION 3.4.3) - -include(CheckCCompilerFlag) -check_c_compiler_flag("-std=c99" C99_SUPPORTED) -if (C99_SUPPORTED) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99") -endif() - -include(CheckFunctionExists) -include(CheckSymbolExists) - -add_definitions(-D_GNU_SOURCE) -list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) - -check_function_exists(execve HAVE_EXECVE) -check_function_exists(execv HAVE_EXECV) -check_function_exists(execvpe HAVE_EXECVPE) -check_function_exists(execvp HAVE_EXECVP) -check_function_exists(execvP HAVE_EXECVP2) -check_function_exists(exect HAVE_EXECT) -check_function_exists(execl HAVE_EXECL) -check_function_exists(execlp HAVE_EXECLP) -check_function_exists(execle HAVE_EXECLE) -check_function_exists(posix_spawn HAVE_POSIX_SPAWN) -check_function_exists(posix_spawnp HAVE_POSIX_SPAWNP) - -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) -include_directories(${CMAKE_CURRENT_BINARY_DIR}) - -add_executable(exec main.c) Index: tools/scan-build-py/tests/functional/exec/config.h.in =================================================================== --- tools/scan-build-py/tests/functional/exec/config.h.in +++ /dev/null @@ -1,20 +0,0 @@ -/* -*- coding: utf-8 -*- -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -*/ - -#pragma once - -#cmakedefine HAVE_EXECVE -#cmakedefine HAVE_EXECV -#cmakedefine HAVE_EXECVPE -#cmakedefine HAVE_EXECVP -#cmakedefine HAVE_EXECVP2 -#cmakedefine HAVE_EXECT -#cmakedefine HAVE_EXECL -#cmakedefine HAVE_EXECLP -#cmakedefine HAVE_EXECLE -#cmakedefine HAVE_POSIX_SPAWN -#cmakedefine HAVE_POSIX_SPAWNP Index: tools/scan-build-py/tests/functional/exec/main.c =================================================================== --- tools/scan-build-py/tests/functional/exec/main.c +++ /dev/null @@ -1,307 +0,0 @@ -/* -*- coding: utf-8 -*- -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -*/ - -#include "config.h" - -#include -#include -#include -#include -#include - -#if defined HAVE_POSIX_SPAWN || defined HAVE_POSIX_SPAWNP -#include -#endif - -// ..:: environment access fixer - begin ::.. -#ifdef HAVE_NSGETENVIRON -#include -#else -extern char **environ; -#endif - -char **get_environ() { -#ifdef HAVE_NSGETENVIRON - return *_NSGetEnviron(); -#else - return environ; -#endif -} -// ..:: environment access fixer - end ::.. - -// ..:: test fixtures - begin ::.. -static char const *cwd = NULL; -static FILE *fd = NULL; -static int need_comma = 0; - -void expected_out_open(const char *expected) { - cwd = getcwd(NULL, 0); - fd = fopen(expected, "w"); - if (!fd) { - perror("fopen"); - exit(EXIT_FAILURE); - } - fprintf(fd, "[\n"); - need_comma = 0; -} - -void expected_out_close() { - fprintf(fd, "]\n"); - fclose(fd); - fd = NULL; - - free((void *)cwd); - cwd = NULL; -} - -void expected_out(const char *file) { - if (need_comma) - fprintf(fd, ",\n"); - else - need_comma = 1; - - fprintf(fd, "{\n"); - fprintf(fd, " \"directory\": \"%s\",\n", cwd); - fprintf(fd, " \"command\": \"cc -c %s\",\n", file); - fprintf(fd, " \"file\": \"%s/%s\"\n", cwd, file); - fprintf(fd, "}\n"); -} - -void create_source(char *file) { - FILE *fd = fopen(file, "w"); - if (!fd) { - perror("fopen"); - exit(EXIT_FAILURE); - } - fprintf(fd, "typedef int score;\n"); - fclose(fd); -} - -typedef void (*exec_fun)(); - -void wait_for(pid_t child) { - int status; - if (-1 == waitpid(child, &status, 0)) { - perror("wait"); - exit(EXIT_FAILURE); - } - if (WIFEXITED(status) ? WEXITSTATUS(status) : EXIT_FAILURE) { - fprintf(stderr, "children process has non zero exit code\n"); - exit(EXIT_FAILURE); - } -} - -#define FORK(FUNC) \ - { \ - pid_t child = fork(); \ - if (-1 == child) { \ - perror("fork"); \ - exit(EXIT_FAILURE); \ - } else if (0 == child) { \ - FUNC fprintf(stderr, "children process failed to exec\n"); \ - exit(EXIT_FAILURE); \ - } else { \ - wait_for(child); \ - } \ - } -// ..:: test fixtures - end ::.. - -#ifdef HAVE_EXECV -void call_execv() { - char *const file = "execv.c"; - char *const compiler = "/usr/bin/cc"; - char *const argv[] = {"cc", "-c", file, 0}; - - expected_out(file); - create_source(file); - - FORK(execv(compiler, argv);) -} -#endif - -#ifdef HAVE_EXECVE -void call_execve() { - char *const file = "execve.c"; - char *const compiler = "/usr/bin/cc"; - char *const argv[] = {compiler, "-c", file, 0}; - char *const envp[] = {"THIS=THAT", 0}; - - expected_out(file); - create_source(file); - - FORK(execve(compiler, argv, envp);) -} -#endif - -#ifdef HAVE_EXECVP -void call_execvp() { - char *const file = "execvp.c"; - char *const compiler = "cc"; - char *const argv[] = {compiler, "-c", file, 0}; - - expected_out(file); - create_source(file); - - FORK(execvp(compiler, argv);) -} -#endif - -#ifdef HAVE_EXECVP2 -void call_execvP() { - char *const file = "execv_p.c"; - char *const compiler = "cc"; - char *const argv[] = {compiler, "-c", file, 0}; - - expected_out(file); - create_source(file); - - FORK(execvP(compiler, _PATH_DEFPATH, argv);) -} -#endif - -#ifdef HAVE_EXECVPE -void call_execvpe() { - char *const file = "execvpe.c"; - char *const compiler = "cc"; - char *const argv[] = {"/usr/bin/cc", "-c", file, 0}; - char *const envp[] = {"THIS=THAT", 0}; - - expected_out(file); - create_source(file); - - FORK(execvpe(compiler, argv, envp);) -} -#endif - -#ifdef HAVE_EXECT -void call_exect() { - char *const file = "exect.c"; - char *const compiler = "/usr/bin/cc"; - char *const argv[] = {compiler, "-c", file, 0}; - char *const envp[] = {"THIS=THAT", 0}; - - expected_out(file); - create_source(file); - - FORK(exect(compiler, argv, envp);) -} -#endif - -#ifdef HAVE_EXECL -void call_execl() { - char *const file = "execl.c"; - char *const compiler = "/usr/bin/cc"; - - expected_out(file); - create_source(file); - - FORK(execl(compiler, "cc", "-c", file, (char *)0);) -} -#endif - -#ifdef HAVE_EXECLP -void call_execlp() { - char *const file = "execlp.c"; - char *const compiler = "cc"; - - expected_out(file); - create_source(file); - - FORK(execlp(compiler, compiler, "-c", file, (char *)0);) -} -#endif - -#ifdef HAVE_EXECLE -void call_execle() { - char *const file = "execle.c"; - char *const compiler = "/usr/bin/cc"; - char *const envp[] = {"THIS=THAT", 0}; - - expected_out(file); - create_source(file); - - FORK(execle(compiler, compiler, "-c", file, (char *)0, envp);) -} -#endif - -#ifdef HAVE_POSIX_SPAWN -void call_posix_spawn() { - char *const file = "posix_spawn.c"; - char *const compiler = "cc"; - char *const argv[] = {compiler, "-c", file, 0}; - - expected_out(file); - create_source(file); - - pid_t child; - if (0 != posix_spawn(&child, "/usr/bin/cc", 0, 0, argv, get_environ())) { - perror("posix_spawn"); - exit(EXIT_FAILURE); - } - wait_for(child); -} -#endif - -#ifdef HAVE_POSIX_SPAWNP -void call_posix_spawnp() { - char *const file = "posix_spawnp.c"; - char *const compiler = "cc"; - char *const argv[] = {compiler, "-c", file, 0}; - - expected_out(file); - create_source(file); - - pid_t child; - if (0 != posix_spawnp(&child, "cc", 0, 0, argv, get_environ())) { - perror("posix_spawnp"); - exit(EXIT_FAILURE); - } - wait_for(child); -} -#endif - -int main(int argc, char *const argv[]) { - if (argc != 2) - exit(EXIT_FAILURE); - - expected_out_open(argv[1]); -#ifdef HAVE_EXECV - call_execv(); -#endif -#ifdef HAVE_EXECVE - call_execve(); -#endif -#ifdef HAVE_EXECVP - call_execvp(); -#endif -#ifdef HAVE_EXECVP2 - call_execvP(); -#endif -#ifdef HAVE_EXECVPE - call_execvpe(); -#endif -#ifdef HAVE_EXECT - call_exect(); -#endif -#ifdef HAVE_EXECL - call_execl(); -#endif -#ifdef HAVE_EXECLP - call_execlp(); -#endif -#ifdef HAVE_EXECLE - call_execle(); -#endif -#ifdef HAVE_POSIX_SPAWN - call_posix_spawn(); -#endif -#ifdef HAVE_POSIX_SPAWNP - call_posix_spawnp(); -#endif - expected_out_close(); - return 0; -} Index: tools/scan-build-py/tests/functional/lit.local.cfg =================================================================== --- /dev/null +++ tools/scan-build-py/tests/functional/lit.local.cfg @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import os +import os.path +import lit.util + +# test_source_root: The root path where tests are located. +this_dir = os.path.dirname(__file__) + +config.environment['test_input_dir'] = os.path.join(this_dir, 'Input') + +# this hack is needed to run the right compiler on travis-ci +clang=os.environ.get('TRAVIS_CLANG', 'clang') +clangpp=os.environ.get('TRAVIS_CLANGPP', 'clang++') + +config.environment['CC'] = clang +config.environment['CXX'] = clangpp + +# this puts the executables into the path +bin_dir = os.path.join(config.environment['project_dir'], 'bin') +tools_dir = os.path.join(config.test_exec_root, 'tools') +current_path = config.environment['PATH'] +config.environment['PATH'] = os.pathsep.join([bin_dir, tools_dir, current_path]) + +config.substitutions.append( + ('%{scan-build}', + 'scan-build --use-analyzer={0} --use-cc={0} --use-c++={1} -vvvv'.format(clang, clangpp))) +config.substitutions.append( + ('%{analyze-build}', + 'analyze-build --use-analyzer={0} -vvvv'.format(clang))) +config.substitutions.append( + ('%{intercept-build}', + 'intercept-build --use-cc={0} --use-c++={1} -vvvv'.format(clang, clangpp))) Index: tools/scan-build-py/tests/functional/src/broken-one.c =================================================================== --- tools/scan-build-py/tests/functional/src/broken-one.c +++ /dev/null @@ -1,6 +0,0 @@ -#include - -int value(int in) -{ - return 2 * in; -} Index: tools/scan-build-py/tests/functional/src/broken-two.c =================================================================== --- tools/scan-build-py/tests/functional/src/broken-two.c +++ /dev/null @@ -1 +0,0 @@ -int test() { ; Index: tools/scan-build-py/tests/functional/src/build/Makefile =================================================================== --- tools/scan-build-py/tests/functional/src/build/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -SRCDIR := .. -OBJDIR := . - -CFLAGS = -Wall -DDEBUG -Dvariable="value with space" -I $(SRCDIR)/include -LDFLAGS = -PROGRAM = $(OBJDIR)/prg - -$(OBJDIR)/main.o: $(SRCDIR)/main.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/main.c - -$(OBJDIR)/clean-one.o: $(SRCDIR)/clean-one.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/clean-one.c - -$(OBJDIR)/clean-two.o: $(SRCDIR)/clean-two.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/clean-two.c - -$(OBJDIR)/emit-one.o: $(SRCDIR)/emit-one.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/emit-one.c - -$(OBJDIR)/emit-two.o: $(SRCDIR)/emit-two.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/emit-two.c - -$(OBJDIR)/broken-one.o: $(SRCDIR)/broken-one.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/broken-one.c - -$(OBJDIR)/broken-two.o: $(SRCDIR)/broken-two.c - $(CC) $(CFLAGS) -c -o $@ $(SRCDIR)/broken-two.c - -$(PROGRAM): $(OBJDIR)/main.o $(OBJDIR)/clean-one.o $(OBJDIR)/clean-two.o $(OBJDIR)/emit-one.o $(OBJDIR)/emit-two.o - $(CC) $(LDFLAGS) -o $@ $(OBJDIR)/main.o $(OBJDIR)/clean-one.o $(OBJDIR)/clean-two.o $(OBJDIR)/emit-one.o $(OBJDIR)/emit-two.o - -build_regular: $(PROGRAM) - -build_clean: $(OBJDIR)/main.o $(OBJDIR)/clean-one.o $(OBJDIR)/clean-two.o - -build_broken: $(OBJDIR)/main.o $(OBJDIR)/broken-one.o $(OBJDIR)/broken-two.o - -build_all_in_one: $(SRCDIR)/main.c $(SRCDIR)/clean-one.c $(SRCDIR)/clean-two.c $(SRCDIR)/emit-one.c $(SRCDIR)/emit-two.c - $(CC) $(CFLAGS) $(LDFLAGS) -o $(PROGRAM) $(SRCDIR)/main.c $(SRCDIR)/clean-one.c $(SRCDIR)/clean-two.c $(SRCDIR)/emit-one.c $(SRCDIR)/emit-two.c - -clean: - rm -f $(PROGRAM) $(OBJDIR)/*.o Index: tools/scan-build-py/tests/functional/src/clean-one.c =================================================================== --- tools/scan-build-py/tests/functional/src/clean-one.c +++ /dev/null @@ -1,13 +0,0 @@ -#include - -int do_nothing_loop() -{ - int i = 32; - int idx = 0; - - for (idx = i; idx > 0; --idx) - { - i += idx; - } - return i; -} Index: tools/scan-build-py/tests/functional/src/clean-two.c =================================================================== --- tools/scan-build-py/tests/functional/src/clean-two.c +++ /dev/null @@ -1,11 +0,0 @@ -#include - -#include - -unsigned int another_method() -{ - unsigned int const size = do_nothing_loop(); - unsigned int const square = size * size; - - return square; -} Index: tools/scan-build-py/tests/functional/src/compilation_database/build_broken.json.in =================================================================== --- tools/scan-build-py/tests/functional/src/compilation_database/build_broken.json.in +++ /dev/null @@ -1,43 +0,0 @@ -[ -{ - "directory": "${path}", - "command": "g++ -c -o main.o main.c -Wall -DDEBUG -Dvariable=value", - "file": "${path}/main.c" -} -, -{ - "directory": "${path}", - "command": "cc -c -o broken-one.o broken-one.c -Wall -DDEBUG \"-Dvariable=value with space\"", - "file": "${path}/broken-one.c" -} -, -{ - "directory": "${path}", - "command": "g++ -c -o broken-two.o broken-two.c -Wall -DDEBUG -Dvariable=value", - "file": "${path}/broken-two.c" -} -, -{ - "directory": "${path}", - "command": "cc -c -o clean-one.o clean-one.c -Wall -DDEBUG \"-Dvariable=value with space\" -Iinclude", - "file": "${path}/clean-one.c" -} -, -{ - "directory": "${path}", - "command": "g++ -c -o clean-two.o clean-two.c -Wall -DDEBUG -Dvariable=value -I ./include", - "file": "${path}/clean-two.c" -} -, -{ - "directory": "${path}", - "command": "cc -c -o emit-one.o emit-one.c -Wall -DDEBUG \"-Dvariable=value with space\"", - "file": "${path}/emit-one.c" -} -, -{ - "directory": "${path}", - "command": "g++ -c -o emit-two.o emit-two.c -Wall -DDEBUG -Dvariable=value", - "file": "${path}/emit-two.c" -} -] Index: tools/scan-build-py/tests/functional/src/compilation_database/build_clean.json.in =================================================================== --- tools/scan-build-py/tests/functional/src/compilation_database/build_clean.json.in +++ /dev/null @@ -1,19 +0,0 @@ -[ -{ - "directory": "${path}", - "command": "g++ -c -o main.o main.c -Wall -DDEBUG -Dvariable=value", - "file": "${path}/main.c" -} -, -{ - "directory": "${path}", - "command": "cc -c -o clean-one.o clean-one.c -Wall -DDEBUG \"-Dvariable=value with space\" -Iinclude", - "file": "${path}/clean-one.c" -} -, -{ - "directory": "${path}", - "command": "g++ -c -o clean-two.o clean-two.c -Wall -DDEBUG -Dvariable=value -I ./include", - "file": "${path}/clean-two.c" -} -] Index: tools/scan-build-py/tests/functional/src/compilation_database/build_regular.json.in =================================================================== --- tools/scan-build-py/tests/functional/src/compilation_database/build_regular.json.in +++ /dev/null @@ -1,31 +0,0 @@ -[ -{ - "directory": "${path}", - "command": "g++ -c -o main.o main.c -Wall -DDEBUG -Dvariable=value", - "file": "${path}/main.c" -} -, -{ - "directory": "${path}", - "command": "cc -c -o clean-one.o clean-one.c -Wall -DDEBUG \"-Dvariable=value with space\" -Iinclude", - "file": "${path}/clean-one.c" -} -, -{ - "directory": "${path}", - "command": "g++ -c -o clean-two.o clean-two.c -Wall -DDEBUG -Dvariable=value -I ./include", - "file": "${path}/clean-two.c" -} -, -{ - "directory": "${path}", - "command": "cc -c -o emit-one.o emit-one.c -Wall -DDEBUG \"-Dvariable=value with space\"", - "file": "${path}/emit-one.c" -} -, -{ - "directory": "${path}", - "command": "g++ -c -o emit-two.o emit-two.c -Wall -DDEBUG -Dvariable=value", - "file": "${path}/emit-two.c" -} -] Index: tools/scan-build-py/tests/functional/src/emit-one.c =================================================================== --- tools/scan-build-py/tests/functional/src/emit-one.c +++ /dev/null @@ -1,23 +0,0 @@ -#include - -int div(int numerator, int denominator) -{ - return numerator / denominator; -} - -void div_test() -{ - int i = 0; - for (i = 0; i < 2; ++i) - assert(div(2 * i, i) == 2); -} - -int do_nothing() -{ - unsigned int i = 0; - - int k = 100; - int j = k + 1; - - return j; -} Index: tools/scan-build-py/tests/functional/src/emit-two.c =================================================================== --- tools/scan-build-py/tests/functional/src/emit-two.c +++ /dev/null @@ -1,13 +0,0 @@ - -int bad_guy(int * i) -{ - *i = 9; - return *i; -} - -void bad_guy_test() -{ - int * ptr = 0; - - bad_guy(ptr); -} Index: tools/scan-build-py/tests/functional/src/include/clean-one.h =================================================================== --- tools/scan-build-py/tests/functional/src/include/clean-one.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef CLEAN_ONE_H -#define CLEAN_ONE_H - -int do_nothing_loop(); - -#endif Index: tools/scan-build-py/tests/functional/src/main.c =================================================================== --- tools/scan-build-py/tests/functional/src/main.c +++ /dev/null @@ -1,4 +0,0 @@ -int main() -{ - return 0; -} Index: tools/scan-build-py/tests/lit.cfg =================================================================== --- /dev/null +++ tools/scan-build-py/tests/lit.cfg @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import sys +import os.path +import subprocess +import lit.util + +this_dir = os.path.dirname(__file__) +project_dir = os.path.dirname(this_dir) + +config.name = 'scan-build' + +config.test_format = lit.formats.ShTest() +config.test_exec_root = this_dir +config.test_source_root = this_dir + +config.environment['project_dir'] = project_dir + +config.suffixes = ['.py'] +config.excludes = ['Input', 'tools', 'setup.py'] + +config.substitutions.append(('%{python}', sys.executable)) + +# check pep8 util is available +try: + subprocess.call(['pep8', '--version'], stdout=subprocess.PIPE) +except: + pass +else: + config.available_features.add('pep8') + +# classify os script language +is_windows = sys.platform in {'win32', 'cygwin'} +if is_windows: + config.available_features.add('batch') + config.suffixes.append('.bat') + config.environment['windows'] = 'True' +else: + config.available_features.add('shell') + config.suffixes.append('.sh') + +# check for library preload is available +sys.path.append(project_dir) +from libscanbuild.intercept import is_preload_disabled +if not is_preload_disabled(sys.platform): + config.available_features.add('preload') Index: tools/scan-build-py/tests/run_pep8.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/run_pep8.py @@ -0,0 +1,4 @@ +# REQUIRES: pep8 +# RUN: pep8 --show-source --show-pep8 ../libscanbuild +# RUN: pep8 --show-source --show-pep8 ../libear +# RUN: pep8 --show-source --show-pep8 . Index: tools/scan-build-py/tests/tools/cdb_diff =================================================================== --- /dev/null +++ tools/scan-build-py/tests/tools/cdb_diff @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import argparse +import json +import shlex +import os.path +import sys + + +def diff(lhs, rhs): + left = {smooth(entry): entry for entry in lhs} + right = {smooth(entry): entry for entry in rhs} + for key in left.keys(): + if key not in right: + yield '> {}'.format(left[key]) + for key in right.keys(): + if key not in left: + yield '< {}'.format(right[key]) + + +def smooth(entry): + directory = os.path.normpath(entry['directory']) + source = entry['file'] if os.path.isabs(entry['file']) else \ + os.path.normpath(os.path.join(directory, entry['file'])) + arguments = entry['command'].split() if 'command' in entry else \ + entry['arguments'] + return '-'.join([source[::-1]] + arguments) + + +def main(): + """ Semantically diff two compilation databases. """ + parser = argparse.ArgumentParser() + parser.add_argument('left', type=argparse.FileType('r')) + parser.add_argument('right', type=argparse.FileType('r')) + args = parser.parse_args() + # files are open, parse the json content + lhs = json.load(args.left) + rhs = json.load(args.right) + # run the diff and print the result + count = 0 + for result in diff(lhs, rhs): + print(result) + count += 1 + return count + + +sys.exit(main()) Index: tools/scan-build-py/tests/tools/cdb_expect =================================================================== --- /dev/null +++ tools/scan-build-py/tests/tools/cdb_expect @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import os +import os.path +import argparse +import json +import sys + + +def main(): + """ append entry to a compilation database. """ + parser = argparse.ArgumentParser() + parser.add_argument('--cdb', required=True) + parser.add_argument('--command', required=True) + parser.add_argument('--file', required=True) + args = parser.parse_args() + # read existing content from target file + entries = [] + if os.path.exists(args.cdb): + with open(args.cdb, 'r') as handle: + entries = json.load(handle) + # update with the current invocation + current = { + 'directory': os.getcwd(), + 'command': args.command, + 'file': args.file + } + entries.append(current) + # write the result back + with open(args.cdb, 'w') as handle: + json.dump(list(entries), handle, sort_keys=True, indent=4) + return 0 + + +sys.exit(main()) Index: tools/scan-build-py/tests/unit/__init__.py =================================================================== --- tools/scan-build-py/tests/unit/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -from . import test_libear -from . import test_compilation -from . import test_clang -from . import test_runner -from . import test_report -from . import test_analyze -from . import test_intercept -from . import test_shell - - -def load_tests(loader, suite, _): - suite.addTests(loader.loadTestsFromModule(test_libear)) - suite.addTests(loader.loadTestsFromModule(test_compilation)) - suite.addTests(loader.loadTestsFromModule(test_clang)) - suite.addTests(loader.loadTestsFromModule(test_runner)) - suite.addTests(loader.loadTestsFromModule(test_report)) - suite.addTests(loader.loadTestsFromModule(test_analyze)) - suite.addTests(loader.loadTestsFromModule(test_intercept)) - suite.addTests(loader.loadTestsFromModule(test_shell)) - return suite Index: tools/scan-build-py/tests/unit/lit.local.cfg =================================================================== --- /dev/null +++ tools/scan-build-py/tests/unit/lit.local.cfg @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import os.path +import lit.util + +# test_source_root: The root path where tests are located. +this_dir = os.path.dirname(__file__) +parent_dir = os.path.dirname(this_dir) +project_dir = os.path.dirname(parent_dir) + + +config.environment['PYTHONPATH'] = project_dir Index: tools/scan-build-py/tests/unit/test_analyze.py =================================================================== --- tools/scan-build-py/tests/unit/test_analyze.py +++ tools/scan-build-py/tests/unit/test_analyze.py @@ -3,5 +3,343 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. +# +# RUN: %{python} %s +import libear import libscanbuild.analyze as sut +import unittest +import os +import os.path +import glob +import platform + +IS_WINDOWS = os.getenv('windows') + + +class Spy(object): + def __init__(self): + self.arg = None + self.success = 0 + + def call(self, params): + self.arg = params + return self.success + + +class FilteringFlagsTest(unittest.TestCase): + + @staticmethod + def classify_parameters(flags): + spy = Spy() + opts = {'flags': flags} + sut.classify_parameters(opts, spy.call) + return spy.arg + + def assertLanguage(self, expected, flags): + self.assertEqual( + expected, + FilteringFlagsTest.classify_parameters(flags)['language']) + + def test_language_captured(self): + self.assertLanguage(None, []) + self.assertLanguage('c', ['-x', 'c']) + self.assertLanguage('cpp', ['-x', 'cpp']) + + def assertArch(self, expected, flags): + self.assertEqual( + expected, + FilteringFlagsTest.classify_parameters(flags)['arch_list']) + + def test_arch(self): + self.assertArch([], []) + self.assertArch(['mips'], ['-arch', 'mips']) + self.assertArch(['mips', 'i386'], ['-arch', 'mips', '-arch', 'i386']) + + def assertFlagsChanged(self, expected, flags): + self.assertEqual( + expected, + FilteringFlagsTest.classify_parameters(flags)['flags']) + + def assertFlagsUnchanged(self, flags): + self.assertFlagsChanged(flags, flags) + + def assertFlagsFiltered(self, flags): + self.assertFlagsChanged([], flags) + + def test_optimalizations_pass(self): + self.assertFlagsUnchanged(['-O']) + self.assertFlagsUnchanged(['-O1']) + self.assertFlagsUnchanged(['-Os']) + self.assertFlagsUnchanged(['-O2']) + self.assertFlagsUnchanged(['-O3']) + + def test_include_pass(self): + self.assertFlagsUnchanged([]) + self.assertFlagsUnchanged(['-include', '/usr/local/include']) + self.assertFlagsUnchanged(['-I.']) + self.assertFlagsUnchanged(['-I', '.']) + self.assertFlagsUnchanged(['-I/usr/local/include']) + self.assertFlagsUnchanged(['-I', '/usr/local/include']) + self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include']) + self.assertFlagsUnchanged(['-isystem', '/path']) + self.assertFlagsUnchanged(['-isystem=/path']) + + def test_define_pass(self): + self.assertFlagsUnchanged(['-DNDEBUG']) + self.assertFlagsUnchanged(['-UNDEBUG']) + self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2']) + self.assertFlagsUnchanged(['-Dvar="val ues"']) + + def test_output_filtered(self): + self.assertFlagsFiltered(['-o', 'source.o']) + + def test_some_warning_filtered(self): + self.assertFlagsFiltered(['-Wall']) + self.assertFlagsFiltered(['-Wnoexcept']) + self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef']) + self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused']) + + def test_compile_only_flags_pass(self): + self.assertFlagsUnchanged(['-std=C99']) + self.assertFlagsUnchanged(['-nostdinc']) + self.assertFlagsUnchanged(['-isystem', '/image/debian']) + self.assertFlagsUnchanged(['-iprefix', '/usr/local']) + self.assertFlagsUnchanged(['-iquote=me']) + self.assertFlagsUnchanged(['-iquote', 'me']) + + def test_compile_and_link_flags_pass(self): + self.assertFlagsUnchanged(['-fsinged-char']) + self.assertFlagsUnchanged(['-fPIC']) + self.assertFlagsUnchanged(['-stdlib=libc++']) + self.assertFlagsUnchanged(['--sysroot', '/']) + self.assertFlagsUnchanged(['-isysroot', '/']) + + def test_some_flags_filtered(self): + self.assertFlagsFiltered(['-g']) + self.assertFlagsFiltered(['-fsyntax-only']) + self.assertFlagsFiltered(['-save-temps']) + self.assertFlagsFiltered(['-init', 'my_init']) + self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c']) + + +class RunAnalyzerTest(unittest.TestCase): + + @staticmethod + def run_analyzer(content, failures_report): + with libear.temporary_directory() as tmpdir: + filename = os.path.join(tmpdir, 'test.cpp') + with open(filename, 'w') as handle: + handle.write(content) + + opts = { + 'clang': 'clang', + 'directory': os.getcwd(), + 'flags': [], + 'direct_args': [], + 'source': filename, + 'output_dir': tmpdir, + 'output_format': 'plist', + 'output_failures': failures_report + } + spy = Spy() + result = sut.run_analyzer(opts, spy.call) + return result, spy.arg + + def test_run_analyzer(self): + content = "int div(int n, int d) { return n / d; }" + (result, fwds) = RunAnalyzerTest.run_analyzer(content, False) + self.assertEqual(None, fwds) + self.assertEqual(0, result['exit_code']) + + def test_run_analyzer_crash(self): + content = "int div(int n, int d) { return n / d }" + (result, fwds) = RunAnalyzerTest.run_analyzer(content, False) + self.assertEqual(None, fwds) + self.assertEqual(1, result['exit_code']) + + def test_run_analyzer_crash_and_forwarded(self): + content = "int div(int n, int d) { return n / d }" + (_, fwds) = RunAnalyzerTest.run_analyzer(content, True) + self.assertEqual(1, fwds['exit_code']) + self.assertTrue(len(fwds['error_output']) > 0) + + +class ReportFailureTest(unittest.TestCase): + + def assertUnderFailures(self, path): + self.assertEqual('failures', os.path.basename(os.path.dirname(path))) + + def test_report_failure_create_files(self): + with libear.temporary_directory() as tmp_dir: + # create input file + filename = os.path.join(tmp_dir, 'test.c') + with open(filename, 'w') as handle: + handle.write('int main() { return 0') + uname_msg = ' '.join(platform.uname()).strip() + error_msg = 'this is my error output' + # execute test + opts = { + 'clang': 'clang', + 'directory': os.getcwd(), + 'flags': [], + 'source': filename, + 'output_dir': tmp_dir, + 'language': 'c', + 'error_output': error_msg, + 'exit_code': 13 + } + sut.report_failure(opts) + # find the info file + pp_files = glob.glob(os.path.join(tmp_dir, 'failures', '*.i')) + self.assertIsNot(pp_files, []) + pp_file = pp_files[0] + # info file generated and content dumped + info_file = pp_file + '.info.txt' + self.assertTrue(os.path.exists(info_file)) + with open(info_file) as info_handler: + lines = [line.strip() for line in info_handler.readlines() if + line.strip()] + self.assertEqual('Other Error', lines[1]) + self.assertEqual(uname_msg, lines[3]) + # error file generated and content dumped + error_file = pp_file + '.stderr.txt' + self.assertTrue(os.path.exists(error_file)) + with open(error_file) as error_handle: + self.assertEqual([error_msg], error_handle.readlines()) + + +class AnalyzerTest(unittest.TestCase): + + def test_nodebug_macros_appended(self): + def test(flags): + spy = Spy() + opts = {'flags': flags, 'force_debug': True} + self.assertEqual(spy.success, + sut.filter_debug_flags(opts, spy.call)) + return spy.arg['flags'] + + self.assertEqual(['-UNDEBUG'], test([])) + self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG'])) + self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething'])) + + def test_set_language_fall_through(self): + def language(expected, input): + spy = Spy() + input.update({'compiler': 'c', 'source': 'test.c'}) + self.assertEqual(spy.success, sut.language_check(input, spy.call)) + self.assertEqual(expected, spy.arg['language']) + + language('c', {'language': 'c', 'flags': []}) + language('c++', {'language': 'c++', 'flags': []}) + + def test_set_language_stops_on_not_supported(self): + spy = Spy() + input = { + 'compiler': 'c', + 'flags': [], + 'source': 'test.java', + 'language': 'java' + } + self.assertIsNone(sut.language_check(input, spy.call)) + self.assertIsNone(spy.arg) + + def test_set_language_sets_flags(self): + def flags(expected, input): + spy = Spy() + input.update({'compiler': 'c', 'source': 'test.c'}) + self.assertEqual(spy.success, sut.language_check(input, spy.call)) + self.assertEqual(expected, spy.arg['flags']) + + flags(['-x', 'c'], {'language': 'c', 'flags': []}) + flags(['-x', 'c++'], {'language': 'c++', 'flags': []}) + + def test_set_language_from_filename(self): + def language(expected, input): + spy = Spy() + input.update({'language': None, 'flags': []}) + self.assertEqual(spy.success, sut.language_check(input, spy.call)) + self.assertEqual(expected, spy.arg['language']) + + language('c', {'source': 'file.c', 'compiler': 'c'}) + language('c++', {'source': 'file.c', 'compiler': 'c++'}) + language('c++', {'source': 'file.cxx', 'compiler': 'c'}) + language('c++', {'source': 'file.cxx', 'compiler': 'c++'}) + language('c++', {'source': 'file.cpp', 'compiler': 'c++'}) + language('c-cpp-output', {'source': 'file.i', 'compiler': 'c'}) + language('c++-cpp-output', {'source': 'file.i', 'compiler': 'c++'}) + + def test_arch_loop_sets_flags(self): + def flags(archs): + spy = Spy() + input = {'flags': [], 'arch_list': archs} + sut.arch_check(input, spy.call) + return spy.arg['flags'] + + self.assertEqual([], flags([])) + self.assertEqual(['-arch', 'i386'], flags(['i386'])) + self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc'])) + self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc'])) + + def test_arch_loop_stops_on_not_supported(self): + def stop(archs): + spy = Spy() + input = {'flags': [], 'arch_list': archs} + self.assertIsNone(sut.arch_check(input, spy.call)) + self.assertIsNone(spy.arg) + + stop(['ppc']) + stop(['ppc64']) + + +@sut.require([]) +def method_without_expecteds(opts): + return 0 + + +@sut.require(['this', 'that']) +def method_with_expecteds(opts): + return 0 + + +@sut.require([]) +def method_exception_from_inside(opts): + raise Exception('here is one') + + +class RequireDecoratorTest(unittest.TestCase): + + def test_method_without_expecteds(self): + self.assertEqual(method_without_expecteds(dict()), 0) + self.assertEqual(method_without_expecteds({}), 0) + self.assertEqual(method_without_expecteds({'this': 2}), 0) + self.assertEqual(method_without_expecteds({'that': 3}), 0) + + def test_method_with_expecteds(self): + self.assertRaises(AssertionError, method_with_expecteds, dict()) + self.assertRaises(AssertionError, method_with_expecteds, {}) + self.assertRaises(AssertionError, method_with_expecteds, {'this': 2}) + self.assertRaises(AssertionError, method_with_expecteds, {'that': 3}) + self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0) + + def test_method_exception_not_caught(self): + self.assertRaises(Exception, method_exception_from_inside, dict()) + + +class ReportDirectoryTest(unittest.TestCase): + + # Test that successive report directory names ascend in lexicographic + # order. This is required so that report directories from two runs of + # scan-build can be easily matched up to compare results. + @unittest.skipIf(IS_WINDOWS, 'windows has low resolution timer') + def test_directory_name_comparison(self): + with libear.temporary_directory() as tmp_dir, \ + sut.report_directory(tmp_dir, False) as report_dir1, \ + sut.report_directory(tmp_dir, False) as report_dir2, \ + sut.report_directory(tmp_dir, False) as report_dir3: + self.assertLess(report_dir1, report_dir2) + self.assertLess(report_dir2, report_dir3) + + +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_clang.py =================================================================== --- tools/scan-build-py/tests/unit/test_clang.py +++ tools/scan-build-py/tests/unit/test_clang.py @@ -3,6 +3,8 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. +# +# RUN: %{python} %s import libear import libscanbuild.clang as sut @@ -22,7 +24,7 @@ class ClangGetArgumentsTest(unittest.TestCase): def test_get_clang_arguments(self): - with libear.TemporaryDirectory() as tmpdir: + with libear.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.c') with open(filename, 'w') as handle: handle.write('') @@ -92,3 +94,7 @@ self.assertEqual('Checker One description', result.get('checker.one')) self.assertTrue('checker.two' in result) self.assertEqual('Checker Two description', result.get('checker.two')) + + +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_compilation.py =================================================================== --- tools/scan-build-py/tests/unit/test_compilation.py +++ tools/scan-build-py/tests/unit/test_compilation.py @@ -3,6 +3,8 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. +# +# RUN: %{python} %s import libscanbuild.compilation as sut import unittest @@ -10,88 +12,156 @@ class CompilerTest(unittest.TestCase): - def test_is_compiler_call(self): - self.assertIsNotNone(sut.compiler_language(['clang'])) - self.assertIsNotNone(sut.compiler_language(['clang-3.6'])) - self.assertIsNotNone(sut.compiler_language(['clang++'])) - self.assertIsNotNone(sut.compiler_language(['clang++-3.5.1'])) - self.assertIsNotNone(sut.compiler_language(['cc'])) - self.assertIsNotNone(sut.compiler_language(['c++'])) - self.assertIsNotNone(sut.compiler_language(['gcc'])) - self.assertIsNotNone(sut.compiler_language(['g++'])) - self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/gcc'])) - self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/g++'])) - self.assertIsNotNone(sut.compiler_language(['/usr/local/bin/clang'])) - self.assertIsNotNone( - sut.compiler_language(['armv7_neno-linux-gnueabi-g++'])) - - self.assertIsNone(sut.compiler_language([])) - self.assertIsNone(sut.compiler_language([''])) - self.assertIsNone(sut.compiler_language(['ld'])) - self.assertIsNone(sut.compiler_language(['as'])) - self.assertIsNone(sut.compiler_language(['/usr/local/bin/compiler'])) + def assert_c_compiler(self, command, cc='nope', cxx='nope++'): + value = sut.Compilation._split_compiler(command, cc, cxx) + self.assertIsNotNone(value) + self.assertEqual(value[0], 'c') + + def assert_cxx_compiler(self, command, cc='nope', cxx='nope++'): + value = sut.Compilation._split_compiler(command, cc, cxx) + self.assertIsNotNone(value) + self.assertEqual(value[0], 'c++') + + def assert_not_compiler(self, command): + value = sut.Compilation._split_compiler(command, 'nope', 'nope') + self.assertIsNone(value) + + def test_compiler_call(self): + self.assert_c_compiler(['cc']) + self.assert_cxx_compiler(['CC']) + self.assert_cxx_compiler(['c++']) + self.assert_cxx_compiler(['cxx']) + + def test_clang_compiler_call(self): + self.assert_c_compiler(['clang']) + self.assert_c_compiler(['clang-3.6']) + self.assert_cxx_compiler(['clang++']) + self.assert_cxx_compiler(['clang++-3.5.1']) + + def test_gcc_compiler_call(self): + self.assert_c_compiler(['gcc']) + self.assert_cxx_compiler(['g++']) + + def test_intel_compiler_call(self): + self.assert_c_compiler(['icc']) + self.assert_cxx_compiler(['icpc']) + + def test_aix_compiler_call(self): + self.assert_c_compiler(['xlc']) + self.assert_cxx_compiler(['xlc++']) + self.assert_cxx_compiler(['xlC']) + self.assert_c_compiler(['gxlc']) + self.assert_cxx_compiler(['gxlc++']) + + def test_open_mpi_compiler_call(self): + self.assert_c_compiler(['mpicc']) + self.assert_cxx_compiler(['mpiCC']) + self.assert_cxx_compiler(['mpicxx']) + self.assert_cxx_compiler(['mpic++']) + + def test_compiler_call_with_path(self): + self.assert_c_compiler(['/usr/local/bin/gcc']) + self.assert_cxx_compiler(['/usr/local/bin/g++']) + self.assert_c_compiler(['/usr/local/bin/clang']) + + def test_cross_compiler_call(self): + self.assert_cxx_compiler(['armv7_neno-linux-gnueabi-g++']) + + def test_compiler_wrapper_call(self): + self.assert_c_compiler(['distcc']) + self.assert_c_compiler(['distcc', 'cc']) + self.assert_cxx_compiler(['distcc', 'c++']) + self.assert_c_compiler(['ccache']) + self.assert_c_compiler(['ccache', 'cc']) + self.assert_cxx_compiler(['ccache', 'c++']) + + def test_non_compiler_call(self): + self.assert_not_compiler([]) + self.assert_not_compiler(['']) + self.assert_not_compiler(['ld']) + self.assert_not_compiler(['as']) + self.assert_not_compiler(['/usr/local/bin/compiler']) + + def test_specific_compiler_call(self): + self.assert_c_compiler(['nope'], cc='nope') + self.assert_c_compiler(['./nope'], cc='nope') + self.assert_c_compiler(['/path/nope'], cc='nope') + self.assert_cxx_compiler(['nope++'], cxx='nope++') + self.assert_cxx_compiler(['./nope++'], cxx='nope++') + self.assert_cxx_compiler(['/path/nope++'], cxx='nope++') + + def assert_arguments_equal(self, expected, command): + value = sut.Compilation._split_compiler(command, 'nope', 'nope') + self.assertIsNotNone(value) + self.assertEqual(expected, value[1]) + + def test_argument_split(self): + arguments = ['-c', 'file.c'] + self.assert_arguments_equal(arguments, ['distcc'] + arguments) + self.assert_arguments_equal(arguments, ['distcc', 'cc'] + arguments) + self.assert_arguments_equal(arguments, ['distcc', 'c++'] + arguments) + self.assert_arguments_equal(arguments, ['ccache'] + arguments) + self.assert_arguments_equal(arguments, ['ccache', 'cc'] + arguments) + self.assert_arguments_equal(arguments, ['ccache', 'c++'] + arguments) class SplitTest(unittest.TestCase): - def test_detect_cxx_from_compiler_name(self): - def test(cmd): - result = sut.split_command([cmd, '-c', 'src.c']) - self.assertIsNotNone(result, "wrong input for test") - return result.compiler == 'c++' - - self.assertFalse(test('cc')) - self.assertFalse(test('gcc')) - self.assertFalse(test('clang')) + def assert_compilation(self, command): + result = sut.Compilation._split_command(command, 'nope', 'nope') + self.assertIsNotNone(result) - self.assertTrue(test('c++')) - self.assertTrue(test('g++')) - self.assertTrue(test('g++-5.3.1')) - self.assertTrue(test('clang++')) - self.assertTrue(test('clang++-3.7.1')) - self.assertTrue(test('armv7_neno-linux-gnueabi-g++')) + def assert_non_compilation(self, command): + result = sut.Compilation._split_command(command, 'nope', 'nope') + self.assertIsNone(result) def test_action(self): - self.assertIsNotNone(sut.split_command(['clang', 'source.c'])) - self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c'])) - self.assertIsNotNone(sut.split_command(['clang', '-c', 'source.c', - '-MF', 'a.d'])) + self.assert_compilation(['clang', 'source.c']) + self.assert_compilation(['clang', '-c', 'source.c']) + self.assert_compilation(['clang', '-c', 'source.c', '-MF', 'a.d']) + + self.assert_non_compilation(['clang', '-E', 'source.c']) + self.assert_non_compilation(['clang', '-c', '-E', 'source.c']) + self.assert_non_compilation(['clang', '-c', '-M', 'source.c']) + self.assert_non_compilation(['clang', '-c', '-MM', 'source.c']) - self.assertIsNone(sut.split_command(['clang', '-E', 'source.c'])) - self.assertIsNone(sut.split_command(['clang', '-c', '-E', 'source.c'])) - self.assertIsNone(sut.split_command(['clang', '-c', '-M', 'source.c'])) - self.assertIsNone( - sut.split_command(['clang', '-c', '-MM', 'source.c'])) + def assert_source_files(self, expected, command): + result = sut.Compilation._split_command(command, 'nope', 'nope') + self.assertIsNotNone(result) + self.assertEqual(expected, result.files) def test_source_file(self): - def test(expected, cmd): - self.assertEqual(expected, sut.split_command(cmd).files) - - test(['src.c'], ['clang', 'src.c']) - test(['src.c'], ['clang', '-c', 'src.c']) - test(['src.C'], ['clang', '-x', 'c', 'src.C']) - test(['src.cpp'], ['clang++', '-c', 'src.cpp']) - test(['s1.c', 's2.c'], ['clang', '-c', 's1.c', 's2.c']) - test(['s1.c', 's2.c'], ['cc', 's1.c', 's2.c', '-ldep', '-o', 'a.out']) - test(['src.c'], ['clang', '-c', '-I', './include', 'src.c']) - test(['src.c'], ['clang', '-c', '-I', '/opt/me/include', 'src.c']) - test(['src.c'], ['clang', '-c', '-D', 'config=file.c', 'src.c']) - - self.assertIsNone( - sut.split_command(['cc', 'this.o', 'that.o', '-o', 'a.out'])) - self.assertIsNone( - sut.split_command(['cc', 'this.o', '-lthat', '-o', 'a.out'])) + self.assert_source_files(['src.c'], ['clang', 'src.c']) + self.assert_source_files(['src.c'], ['clang', '-c', 'src.c']) + self.assert_source_files(['src.C'], ['clang', '-x', 'c', 'src.C']) + self.assert_source_files(['src.cpp'], ['clang++', '-c', 'src.cpp']) + self.assert_source_files(['s1.c', 's2.c'], + ['clang', '-c', 's1.c', 's2.c']) + self.assert_source_files(['s1.c', 's2.c'], + ['cc', 's1.c', 's2.c', '-ldp', '-o', 'a.out']) + self.assert_source_files(['src.c'], + ['clang', '-c', '-I', './include', 'src.c']) + self.assert_source_files(['src.c'], + ['clang', '-c', '-I', '/opt/inc', 'src.c']) + self.assert_source_files(['src.c'], + ['clang', '-c', '-Dconfig=file.c', 'src.c']) + + self.assert_non_compilation(['cc', 'this.o', 'that.o', '-o', 'a.out']) + self.assert_non_compilation(['cc', 'this.o', '-lthat', '-o', 'a.out']) + + def assert_flags(self, expected, flags): + command = ['clang', '-c', 'src.c'] + flags + result = sut.Compilation._split_command(command, 'nope', 'nope') + self.assertIsNotNone(result) + self.assertEqual(expected, result.flags) def test_filter_flags(self): - def test(expected, flags): - command = ['clang', '-c', 'src.c'] + flags - self.assertEqual(expected, sut.split_command(command).flags) def same(expected): - test(expected, expected) + self.assert_flags(expected, expected) def filtered(flags): - test([], flags) + self.assert_flags([], flags) same([]) same(['-I', '/opt/me/include', '-DNDEBUG', '-ULIMITS']) @@ -108,15 +178,39 @@ class SourceClassifierTest(unittest.TestCase): + def assert_non_source(self, filename): + result = sut.classify_source(filename) + self.assertIsNone(result) + + def assert_c_source(self, filename, force): + result = sut.classify_source(filename, force) + self.assertEqual('c', result) + + def assert_cxx_source(self, filename, force): + result = sut.classify_source(filename, force) + self.assertEqual('c++', result) + def test_sources(self): - self.assertIsNone(sut.classify_source('file.o')) - self.assertIsNone(sut.classify_source('file.exe')) - self.assertIsNone(sut.classify_source('/path/file.o')) - self.assertIsNone(sut.classify_source('clang')) - - self.assertEqual('c', sut.classify_source('file.c')) - self.assertEqual('c', sut.classify_source('./file.c')) - self.assertEqual('c', sut.classify_source('/path/file.c')) - self.assertEqual('c++', sut.classify_source('file.c', False)) - self.assertEqual('c++', sut.classify_source('./file.c', False)) - self.assertEqual('c++', sut.classify_source('/path/file.c', False)) + self.assert_non_source('file.o') + self.assert_non_source('file.exe') + self.assert_non_source('/path/file.o') + self.assert_non_source('clang') + + self.assert_c_source('file.c', True) + self.assert_cxx_source('file.c', False) + + self.assert_cxx_source('file.cxx', True) + self.assert_cxx_source('file.cxx', False) + self.assert_cxx_source('file.c++', True) + self.assert_cxx_source('file.c++', False) + self.assert_cxx_source('file.cpp', True) + self.assert_cxx_source('file.cpp', False) + + self.assert_c_source('/path/file.c', True) + self.assert_c_source('./path/file.c', True) + self.assert_c_source('../path/file.c', True) + self.assert_c_source('/file.c', True) + self.assert_c_source('./file.c', True) + +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_intercept.py =================================================================== --- tools/scan-build-py/tests/unit/test_intercept.py +++ tools/scan-build-py/tests/unit/test_intercept.py @@ -3,39 +3,34 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. +# +# RUN: %{python} %s -import libear -import libscanbuild.intercept as sut -import unittest +import os import os.path +import unittest +import libear +import libscanbuild.intercept as sut +from libscanbuild import Execution -class InterceptUtilTest(unittest.TestCase): - - def test_format_entry_filters_action(self): - def test(command): - trace = {'command': command, 'directory': '/opt/src/project'} - return list(sut.format_entry(trace)) - - self.assertTrue(test(['cc', '-c', 'file.c', '-o', 'file.o'])) - self.assertFalse(test(['cc', '-E', 'file.c'])) - self.assertFalse(test(['cc', '-MM', 'file.c'])) - self.assertFalse(test(['cc', 'this.o', 'that.o', '-o', 'a.out'])) - - def test_format_entry_normalize_filename(self): - parent = os.path.join(os.sep, 'home', 'me') - current = os.path.join(parent, 'project') +IS_WINDOWS = os.getenv('windows') - def test(filename): - trace = {'directory': current, 'command': ['cc', '-c', filename]} - return list(sut.format_entry(trace))[0]['file'] - self.assertEqual(os.path.join(current, 'file.c'), test('file.c')) - self.assertEqual(os.path.join(current, 'file.c'), test('./file.c')) - self.assertEqual(os.path.join(parent, 'file.c'), test('../file.c')) - self.assertEqual(os.path.join(current, 'file.c'), - test(os.path.join(current, 'file.c'))) +class InterceptUtilTest(unittest.TestCase): + def test_read_write_exec_trace(self): + input_one = Execution( + pid=123, + cwd='/path/to/here', + cmd=['cc', '-c', 'this.c']) + with libear.temporary_directory() as tmp_dir: + temp_file = os.path.join(tmp_dir, 'single_report.cmd') + sut.write_exec_trace(temp_file, input_one) + result = sut.parse_exec_trace(temp_file) + self.assertEqual(input_one, result) + + @unittest.skipIf(IS_WINDOWS, 'this code is not running on windows') def test_sip(self): def create_status_report(filename, message): content = """#!/usr/bin/env sh @@ -45,9 +40,9 @@ echo 'sa-la-la-la' echo 'la-la-la' """.format(message) - lines = [line.strip() for line in content.split('\n')] + lines = [line.strip() for line in content.split(os.linesep)] with open(filename, 'w') as handle: - handle.write('\n'.join(lines)) + handle.write(os.linesep.join(lines)) handle.close() os.chmod(filename, 0x1ff) @@ -56,43 +51,32 @@ message = 'System Integrity Protection status: {0}'.format(status) return create_status_report(filename, message) - def create_sestatus(dest_dir, status): - filename = os.path.join(dest_dir, 'sestatus') - message = 'SELinux status:\t{0}'.format(status) - return create_status_report(filename, message) - - ENABLED = 'enabled' - DISABLED = 'disabled' - - OSX = 'darwin' - LINUX = 'linux' + enabled = 'enabled' + disabled = 'disabled' + osx = 'darwin' - with libear.TemporaryDirectory() as tmpdir: + saved = os.environ['PATH'] + with libear.temporary_directory() as tmp_dir: try: - saved = os.environ['PATH'] - os.environ['PATH'] = tmpdir + ':' + saved + os.environ['PATH'] = os.pathsep.join([tmp_dir, saved]) - create_csrutil(tmpdir, ENABLED) - self.assertTrue(sut.is_preload_disabled(OSX)) + create_csrutil(tmp_dir, enabled) + self.assertTrue(sut.is_preload_disabled(osx)) - create_csrutil(tmpdir, DISABLED) - self.assertFalse(sut.is_preload_disabled(OSX)) - - create_sestatus(tmpdir, ENABLED) - self.assertTrue(sut.is_preload_disabled(LINUX)) - - create_sestatus(tmpdir, DISABLED) - self.assertFalse(sut.is_preload_disabled(LINUX)) + create_csrutil(tmp_dir, disabled) + self.assertFalse(sut.is_preload_disabled(osx)) finally: os.environ['PATH'] = saved try: - saved = os.environ['PATH'] os.environ['PATH'] = '' # shall be false when it's not in the path - self.assertFalse(sut.is_preload_disabled(OSX)) - self.assertFalse(sut.is_preload_disabled(LINUX)) + self.assertFalse(sut.is_preload_disabled(osx)) self.assertFalse(sut.is_preload_disabled('unix')) finally: os.environ['PATH'] = saved + + +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_libear.py =================================================================== --- tools/scan-build-py/tests/unit/test_libear.py +++ tools/scan-build-py/tests/unit/test_libear.py @@ -3,6 +3,8 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. +# +# RUN: %{python} %s import libear as sut import unittest @@ -11,20 +13,24 @@ class TemporaryDirectoryTest(unittest.TestCase): def test_creates_directory(self): - dirname = None - with sut.TemporaryDirectory() as tmpdir: + dir_name = None + with sut.temporary_directory() as tmpdir: self.assertTrue(os.path.isdir(tmpdir)) - dirname = tmpdir - self.assertIsNotNone(dirname) - self.assertFalse(os.path.exists(dirname)) + dir_name = tmpdir + self.assertIsNotNone(dir_name) + self.assertFalse(os.path.exists(dir_name)) def test_removes_directory_when_exception(self): - dirname = None + dir_name = None try: - with sut.TemporaryDirectory() as tmpdir: + with sut.temporary_directory() as tmpdir: self.assertTrue(os.path.isdir(tmpdir)) - dirname = tmpdir + dir_name = tmpdir raise RuntimeError('message') except: - self.assertIsNotNone(dirname) - self.assertFalse(os.path.exists(dirname)) + self.assertIsNotNone(dir_name) + self.assertFalse(os.path.exists(dir_name)) + + +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_libscanbuild.py =================================================================== --- /dev/null +++ tools/scan-build-py/tests/unit/test_libscanbuild.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +# RUN: %{python} %s + +import libscanbuild as sut +import unittest + + +class ShellSplitTest(unittest.TestCase): + + def test_regular_commands(self): + self.assertEqual([], sut.shell_split("")) + self.assertEqual(['clang', '-c', 'file.c'], + sut.shell_split('clang -c file.c')) + self.assertEqual(['clang', '-c', 'file.c'], + sut.shell_split('clang -c file.c')) + self.assertEqual(['clang', '-c', 'file.c'], + sut.shell_split('clang -c\tfile.c')) + + def test_quoted_commands(self): + self.assertEqual(['clang', '-c', 'file.c'], + sut.shell_split('"clang" -c "file.c"')) + self.assertEqual(['clang', '-c', 'file.c'], + sut.shell_split("'clang' -c 'file.c'")) + + def test_shell_escaping(self): + self.assertEqual(['clang', '-c', 'file.c', '-Dv=space value'], + sut.shell_split('clang -c file.c -Dv="space value"')) + self.assertEqual(['clang', '-c', 'file.c', '-Dv=\"quote'], + sut.shell_split('clang -c file.c -Dv=\\\"quote')) + self.assertEqual(['clang', '-c', 'file.c', '-Dv=(word)'], + sut.shell_split('clang -c file.c -Dv=\(word\)')) + + +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_report.py =================================================================== --- tools/scan-build-py/tests/unit/test_report.py +++ tools/scan-build-py/tests/unit/test_report.py @@ -3,28 +3,35 @@ # # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. +# +# RUN: %{python} %s import libear import libscanbuild.report as sut import unittest import os import os.path +import glob + +IS_WINDOWS = os.getenv('windows') def run_bug_parse(content): - with libear.TemporaryDirectory() as tmpdir: - file_name = os.path.join(tmpdir, 'test.html') + with libear.temporary_directory() as tmp_dir: + file_name = os.path.join(tmp_dir, 'test.html') with open(file_name, 'w') as handle: - handle.writelines(content) + lines = (line + os.linesep for line in content) + handle.writelines(lines) for bug in sut.parse_bug_html(file_name): return bug -def run_crash_parse(content, preproc): - with libear.TemporaryDirectory() as tmpdir: - file_name = os.path.join(tmpdir, preproc + '.info.txt') +def run_crash_parse(content, prefix): + with libear.temporary_directory() as tmp_dir: + file_name = os.path.join(tmp_dir, prefix + '.info.txt') with open(file_name, 'w') as handle: - handle.writelines(content) + lines = (line + os.linesep for line in content) + handle.writelines(lines) return sut.parse_crash(file_name) @@ -32,17 +39,17 @@ def test_parse_bug(self): content = [ - "some header\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "some tails\n"] + "some header", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "some tails"] result = run_bug_parse(content) self.assertEqual(result['bug_category'], 'Logic error') self.assertEqual(result['bug_path_length'], 4) @@ -60,10 +67,10 @@ def test_parse_crash(self): content = [ - "/some/path/file.c\n", - "Some very serious Error\n", - "bla\n", - "bla-bla\n"] + "/some/path/file.c", + "Some very serious Error", + "bla", + "bla-bla"] result = run_crash_parse(content, 'file.i') self.assertEqual(result['source'], content[0].rstrip()) self.assertEqual(result['problem'], content[1].rstrip()) @@ -75,10 +82,9 @@ 'file.i.stderr.txt') def test_parse_real_crash(self): - import libscanbuild.runner as sut2 - import re - with libear.TemporaryDirectory() as tmpdir: - filename = os.path.join(tmpdir, 'test.c') + import libscanbuild.analyze as sut2 + with libear.temporary_directory() as tmp_dir: + filename = os.path.join(tmp_dir, 'test.c') with open(filename, 'w') as handle: handle.write('int main() { return 0') # produce failure report @@ -86,22 +92,17 @@ 'clang': 'clang', 'directory': os.getcwd(), 'flags': [], - 'file': filename, - 'output_dir': tmpdir, + 'source': filename, + 'output_dir': tmp_dir, 'language': 'c', - 'error_type': 'other_error', 'error_output': 'some output', 'exit_code': 13 } sut2.report_failure(opts) # find the info file - pp_file = None - for root, _, files in os.walk(tmpdir): - keys = [os.path.join(root, name) for name in files] - for key in keys: - if re.match(r'^(.*/)+clang(.*)\.i$', key): - pp_file = key - self.assertIsNot(pp_file, None) + pp_files = glob.glob(os.path.join(tmp_dir, 'failures', '*.i')) + self.assertIsNot(pp_files, []) + pp_file = pp_files[0] # read the failure report back result = sut.parse_crash(pp_file + '.info.txt') self.assertEqual(result['source'], filename) @@ -113,49 +114,89 @@ class ReportMethodTest(unittest.TestCase): + @unittest.skipIf(IS_WINDOWS, 'windows has different path patterns') def test_chop(self): self.assertEqual('file', sut.chop('/prefix', '/prefix/file')) self.assertEqual('file', sut.chop('/prefix/', '/prefix/file')) self.assertEqual('lib/file', sut.chop('/prefix/', '/prefix/lib/file')) self.assertEqual('/prefix/file', sut.chop('', '/prefix/file')) + @unittest.skipIf(IS_WINDOWS, 'windows has different path patterns') def test_chop_when_cwd(self): self.assertEqual('../src/file', sut.chop('/cwd', '/src/file')) self.assertEqual('../src/file', sut.chop('/prefix/cwd', '/prefix/src/file')) + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_chop_on_windows(self): + self.assertEqual('file', sut.chop('c:\\prefix', 'c:\\prefix\\file')) + self.assertEqual('file', sut.chop('c:\\prefix\\', 'c:\\prefix\\file')) + self.assertEqual('lib\\file', + sut.chop('c:\\prefix\\', 'c:\\prefix\\lib\\file')) + self.assertEqual('c:\\prefix\\file', sut.chop('', 'c:\\prefix\\file')) + + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_chop_when_cwd_on_windows(self): + self.assertEqual('..\\src\\file', + sut.chop('c:\\cwd', 'c:\\src\\file')) + self.assertEqual('..\\src\\file', + sut.chop('z:\\prefix\\cwd', 'z:\\prefix\\src\\file')) + class GetPrefixFromCompilationDatabaseTest(unittest.TestCase): + def test_empty(self): + self.assertEqual( + sut.commonprefix([]), '') + + @unittest.skipIf(IS_WINDOWS, 'windows has different path patterns') def test_with_different_filenames(self): self.assertEqual( sut.commonprefix(['/tmp/a.c', '/tmp/b.c']), '/tmp') + @unittest.skipIf(IS_WINDOWS, 'windows has different path patterns') def test_with_different_dirnames(self): self.assertEqual( sut.commonprefix(['/tmp/abs/a.c', '/tmp/ack/b.c']), '/tmp') + @unittest.skipIf(IS_WINDOWS, 'windows has different path patterns') def test_no_common_prefix(self): self.assertEqual( sut.commonprefix(['/tmp/abs/a.c', '/usr/ack/b.c']), '/') + @unittest.skipIf(IS_WINDOWS, 'windows has different path patterns') def test_with_single_file(self): self.assertEqual( sut.commonprefix(['/tmp/a.c']), '/tmp') - def test_empty(self): + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_with_different_filenames_on_windows(self): self.assertEqual( - sut.commonprefix([]), '') + sut.commonprefix(['c:\\tmp\\a.c', 'c:\\tmp\\b.c']), 'c:\\tmp') + + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_with_different_dirnames_on_windows(self): + self.assertEqual( + sut.commonprefix(['c:\\tmp\\abs\\a.c', 'c:\\tmp\\ack\\b.c']), + 'c:\\tmp') + + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_no_common_prefix_on_windows(self): + self.assertEqual( + sut.commonprefix(['z:\\tmp\\abs\\a.c', 'z:\\usr\\ack\\b.c']), + 'z:\\') + + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_different_drive_on_windows(self): + self.assertEqual( + sut.commonprefix(['c:\\tmp\\abs\\a.c', 'z:\\usr\\ack\\b.c']), + '') + + @unittest.skipIf(not IS_WINDOWS, 'windows has different path patterns') + def test_with_single_file_on_windows(self): + self.assertEqual( + sut.commonprefix(['z:\\tmp\\a.c']), 'z:\\tmp') + -class ReportDirectoryTest(unittest.TestCase): - - # Test that successive report directory names ascend in lexicographic - # order. This is required so that report directories from two runs of - # scan-build can be easily matched up to compare results. - def test_directory_name_comparison(self): - with libear.TemporaryDirectory() as tmpdir, \ - sut.report_directory(tmpdir, False) as report_dir1, \ - sut.report_directory(tmpdir, False) as report_dir2, \ - sut.report_directory(tmpdir, False) as report_dir3: - self.assertLess(report_dir1, report_dir2) - self.assertLess(report_dir2, report_dir3) +if __name__ == '__main__': + unittest.main() Index: tools/scan-build-py/tests/unit/test_runner.py =================================================================== --- tools/scan-build-py/tests/unit/test_runner.py +++ /dev/null @@ -1,322 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import libear -import libscanbuild.runner as sut -import unittest -import re -import os -import os.path - - -class FilteringFlagsTest(unittest.TestCase): - - def test_language_captured(self): - def test(flags): - cmd = ['clang', '-c', 'source.c'] + flags - opts = sut.classify_parameters(cmd) - return opts['language'] - - self.assertEqual(None, test([])) - self.assertEqual('c', test(['-x', 'c'])) - self.assertEqual('cpp', test(['-x', 'cpp'])) - - def test_arch(self): - def test(flags): - cmd = ['clang', '-c', 'source.c'] + flags - opts = sut.classify_parameters(cmd) - return opts['arch_list'] - - self.assertEqual([], test([])) - self.assertEqual(['mips'], test(['-arch', 'mips'])) - self.assertEqual(['mips', 'i386'], - test(['-arch', 'mips', '-arch', 'i386'])) - - def assertFlagsChanged(self, expected, flags): - cmd = ['clang', '-c', 'source.c'] + flags - opts = sut.classify_parameters(cmd) - self.assertEqual(expected, opts['flags']) - - def assertFlagsUnchanged(self, flags): - self.assertFlagsChanged(flags, flags) - - def assertFlagsFiltered(self, flags): - self.assertFlagsChanged([], flags) - - def test_optimalizations_pass(self): - self.assertFlagsUnchanged(['-O']) - self.assertFlagsUnchanged(['-O1']) - self.assertFlagsUnchanged(['-Os']) - self.assertFlagsUnchanged(['-O2']) - self.assertFlagsUnchanged(['-O3']) - - def test_include_pass(self): - self.assertFlagsUnchanged([]) - self.assertFlagsUnchanged(['-include', '/usr/local/include']) - self.assertFlagsUnchanged(['-I.']) - self.assertFlagsUnchanged(['-I', '.']) - self.assertFlagsUnchanged(['-I/usr/local/include']) - self.assertFlagsUnchanged(['-I', '/usr/local/include']) - self.assertFlagsUnchanged(['-I/opt', '-I', '/opt/otp/include']) - self.assertFlagsUnchanged(['-isystem', '/path']) - self.assertFlagsUnchanged(['-isystem=/path']) - - def test_define_pass(self): - self.assertFlagsUnchanged(['-DNDEBUG']) - self.assertFlagsUnchanged(['-UNDEBUG']) - self.assertFlagsUnchanged(['-Dvar1=val1', '-Dvar2=val2']) - self.assertFlagsUnchanged(['-Dvar="val ues"']) - - def test_output_filtered(self): - self.assertFlagsFiltered(['-o', 'source.o']) - - def test_some_warning_filtered(self): - self.assertFlagsFiltered(['-Wall']) - self.assertFlagsFiltered(['-Wnoexcept']) - self.assertFlagsFiltered(['-Wreorder', '-Wunused', '-Wundef']) - self.assertFlagsUnchanged(['-Wno-reorder', '-Wno-unused']) - - def test_compile_only_flags_pass(self): - self.assertFlagsUnchanged(['-std=C99']) - self.assertFlagsUnchanged(['-nostdinc']) - self.assertFlagsUnchanged(['-isystem', '/image/debian']) - self.assertFlagsUnchanged(['-iprefix', '/usr/local']) - self.assertFlagsUnchanged(['-iquote=me']) - self.assertFlagsUnchanged(['-iquote', 'me']) - - def test_compile_and_link_flags_pass(self): - self.assertFlagsUnchanged(['-fsinged-char']) - self.assertFlagsUnchanged(['-fPIC']) - self.assertFlagsUnchanged(['-stdlib=libc++']) - self.assertFlagsUnchanged(['--sysroot', '/']) - self.assertFlagsUnchanged(['-isysroot', '/']) - - def test_some_flags_filtered(self): - self.assertFlagsFiltered(['-g']) - self.assertFlagsFiltered(['-fsyntax-only']) - self.assertFlagsFiltered(['-save-temps']) - self.assertFlagsFiltered(['-init', 'my_init']) - self.assertFlagsFiltered(['-sectorder', 'a', 'b', 'c']) - - -class Spy(object): - def __init__(self): - self.arg = None - self.success = 0 - - def call(self, params): - self.arg = params - return self.success - - -class RunAnalyzerTest(unittest.TestCase): - - @staticmethod - def run_analyzer(content, failures_report): - with libear.TemporaryDirectory() as tmpdir: - filename = os.path.join(tmpdir, 'test.cpp') - with open(filename, 'w') as handle: - handle.write(content) - - opts = { - 'clang': 'clang', - 'directory': os.getcwd(), - 'flags': [], - 'direct_args': [], - 'file': filename, - 'output_dir': tmpdir, - 'output_format': 'plist', - 'output_failures': failures_report - } - spy = Spy() - result = sut.run_analyzer(opts, spy.call) - return (result, spy.arg) - - def test_run_analyzer(self): - content = "int div(int n, int d) { return n / d; }" - (result, fwds) = RunAnalyzerTest.run_analyzer(content, False) - self.assertEqual(None, fwds) - self.assertEqual(0, result['exit_code']) - - def test_run_analyzer_crash(self): - content = "int div(int n, int d) { return n / d }" - (result, fwds) = RunAnalyzerTest.run_analyzer(content, False) - self.assertEqual(None, fwds) - self.assertEqual(1, result['exit_code']) - - def test_run_analyzer_crash_and_forwarded(self): - content = "int div(int n, int d) { return n / d }" - (_, fwds) = RunAnalyzerTest.run_analyzer(content, True) - self.assertEqual('crash', fwds['error_type']) - self.assertEqual(1, fwds['exit_code']) - self.assertTrue(len(fwds['error_output']) > 0) - - -class ReportFailureTest(unittest.TestCase): - - def assertUnderFailures(self, path): - self.assertEqual('failures', os.path.basename(os.path.dirname(path))) - - def test_report_failure_create_files(self): - with libear.TemporaryDirectory() as tmpdir: - # create input file - filename = os.path.join(tmpdir, 'test.c') - with open(filename, 'w') as handle: - handle.write('int main() { return 0') - uname_msg = ' '.join(os.uname()) + os.linesep - error_msg = 'this is my error output' - # execute test - opts = { - 'clang': 'clang', - 'directory': os.getcwd(), - 'flags': [], - 'file': filename, - 'output_dir': tmpdir, - 'language': 'c', - 'error_type': 'other_error', - 'error_output': error_msg, - 'exit_code': 13 - } - sut.report_failure(opts) - # verify the result - result = dict() - pp_file = None - for root, _, files in os.walk(tmpdir): - keys = [os.path.join(root, name) for name in files] - for key in keys: - with open(key, 'r') as handle: - result[key] = handle.readlines() - if re.match(r'^(.*/)+clang(.*)\.i$', key): - pp_file = key - - # prepocessor file generated - self.assertUnderFailures(pp_file) - # info file generated and content dumped - info_file = pp_file + '.info.txt' - self.assertTrue(info_file in result) - self.assertEqual('Other Error\n', result[info_file][1]) - self.assertEqual(uname_msg, result[info_file][3]) - # error file generated and content dumped - error_file = pp_file + '.stderr.txt' - self.assertTrue(error_file in result) - self.assertEqual([error_msg], result[error_file]) - - -class AnalyzerTest(unittest.TestCase): - - def test_nodebug_macros_appended(self): - def test(flags): - spy = Spy() - opts = {'flags': flags, 'force_debug': True} - self.assertEqual(spy.success, - sut.filter_debug_flags(opts, spy.call)) - return spy.arg['flags'] - - self.assertEqual(['-UNDEBUG'], test([])) - self.assertEqual(['-DNDEBUG', '-UNDEBUG'], test(['-DNDEBUG'])) - self.assertEqual(['-DSomething', '-UNDEBUG'], test(['-DSomething'])) - - def test_set_language_fall_through(self): - def language(expected, input): - spy = Spy() - input.update({'compiler': 'c', 'file': 'test.c'}) - self.assertEqual(spy.success, sut.language_check(input, spy.call)) - self.assertEqual(expected, spy.arg['language']) - - language('c', {'language': 'c', 'flags': []}) - language('c++', {'language': 'c++', 'flags': []}) - - def test_set_language_stops_on_not_supported(self): - spy = Spy() - input = { - 'compiler': 'c', - 'flags': [], - 'file': 'test.java', - 'language': 'java' - } - self.assertIsNone(sut.language_check(input, spy.call)) - self.assertIsNone(spy.arg) - - def test_set_language_sets_flags(self): - def flags(expected, input): - spy = Spy() - input.update({'compiler': 'c', 'file': 'test.c'}) - self.assertEqual(spy.success, sut.language_check(input, spy.call)) - self.assertEqual(expected, spy.arg['flags']) - - flags(['-x', 'c'], {'language': 'c', 'flags': []}) - flags(['-x', 'c++'], {'language': 'c++', 'flags': []}) - - def test_set_language_from_filename(self): - def language(expected, input): - spy = Spy() - input.update({'language': None, 'flags': []}) - self.assertEqual(spy.success, sut.language_check(input, spy.call)) - self.assertEqual(expected, spy.arg['language']) - - language('c', {'file': 'file.c', 'compiler': 'c'}) - language('c++', {'file': 'file.c', 'compiler': 'c++'}) - language('c++', {'file': 'file.cxx', 'compiler': 'c'}) - language('c++', {'file': 'file.cxx', 'compiler': 'c++'}) - language('c++', {'file': 'file.cpp', 'compiler': 'c++'}) - language('c-cpp-output', {'file': 'file.i', 'compiler': 'c'}) - language('c++-cpp-output', {'file': 'file.i', 'compiler': 'c++'}) - - def test_arch_loop_sets_flags(self): - def flags(archs): - spy = Spy() - input = {'flags': [], 'arch_list': archs} - sut.arch_check(input, spy.call) - return spy.arg['flags'] - - self.assertEqual([], flags([])) - self.assertEqual(['-arch', 'i386'], flags(['i386'])) - self.assertEqual(['-arch', 'i386'], flags(['i386', 'ppc'])) - self.assertEqual(['-arch', 'sparc'], flags(['i386', 'sparc'])) - - def test_arch_loop_stops_on_not_supported(self): - def stop(archs): - spy = Spy() - input = {'flags': [], 'arch_list': archs} - self.assertIsNone(sut.arch_check(input, spy.call)) - self.assertIsNone(spy.arg) - - stop(['ppc']) - stop(['ppc64']) - - -@sut.require([]) -def method_without_expecteds(opts): - return 0 - - -@sut.require(['this', 'that']) -def method_with_expecteds(opts): - return 0 - - -@sut.require([]) -def method_exception_from_inside(opts): - raise Exception('here is one') - - -class RequireDecoratorTest(unittest.TestCase): - - def test_method_without_expecteds(self): - self.assertEqual(method_without_expecteds(dict()), 0) - self.assertEqual(method_without_expecteds({}), 0) - self.assertEqual(method_without_expecteds({'this': 2}), 0) - self.assertEqual(method_without_expecteds({'that': 3}), 0) - - def test_method_with_expecteds(self): - self.assertRaises(KeyError, method_with_expecteds, dict()) - self.assertRaises(KeyError, method_with_expecteds, {}) - self.assertRaises(KeyError, method_with_expecteds, {'this': 2}) - self.assertRaises(KeyError, method_with_expecteds, {'that': 3}) - self.assertEqual(method_with_expecteds({'this': 0, 'that': 3}), 0) - - def test_method_exception_not_caught(self): - self.assertRaises(Exception, method_exception_from_inside, dict()) Index: tools/scan-build-py/tests/unit/test_shell.py =================================================================== --- tools/scan-build-py/tests/unit/test_shell.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. - -import libscanbuild.shell as sut -import unittest - - -class ShellTest(unittest.TestCase): - - def test_encode_decode_are_same(self): - def test(value): - self.assertEqual(sut.encode(sut.decode(value)), value) - - test("") - test("clang") - test("clang this and that") - - def test_decode_encode_are_same(self): - def test(value): - self.assertEqual(sut.decode(sut.encode(value)), value) - - test([]) - test(['clang']) - test(['clang', 'this', 'and', 'that']) - test(['clang', 'this and', 'that']) - test(['clang', "it's me", 'again']) - test(['clang', 'some "words" are', 'quoted']) - - def test_encode(self): - self.assertEqual(sut.encode(['clang', "it's me", 'again']), - 'clang "it\'s me" again') - self.assertEqual(sut.encode(['clang', "it(s me", 'again)']), - 'clang "it(s me" "again)"') - self.assertEqual(sut.encode(['clang', 'redirect > it']), - 'clang "redirect > it"') - self.assertEqual(sut.encode(['clang', '-DKEY="VALUE"']), - 'clang -DKEY=\\"VALUE\\"') - self.assertEqual(sut.encode(['clang', '-DKEY="value with spaces"']), - 'clang -DKEY=\\"value with spaces\\"')