Index: tools/scan-build-py/.travis.yml =================================================================== --- /dev/null +++ tools/scan-build-py/.travis.yml @@ -0,0 +1,23 @@ +language: python + +python: + - 2.7 + - 3.2 + - 3.3 + - 3.4 + +before_install: + - echo "deb http://llvm.org/apt/precise/ llvm-toolchain-precise main" | sudo tee -a /etc/apt/sources.list + - echo "deb http://llvm.org/apt/precise/ llvm-toolchain-precise-3.6 main" | sudo tee -a /etc/apt/sources.list + - echo "deb http://ppa.launchpad.net/ubuntu-toolchain-r/test/ubuntu precise main" | sudo tee -a /etc/apt/sources.list + - sudo apt-get update -qq + +install: + - sudo apt-get install --allow-unauthenticated -qq clang-3.6 + - pip install pep8 + +script: + - pep8 libscanbuild tests + - python setup.py check + - python setup.py install + - python setup.py test Index: tools/scan-build-py/CHANGES.txt =================================================================== --- /dev/null +++ tools/scan-build-py/CHANGES.txt @@ -0,0 +1 @@ +v, -- Initial release. Index: tools/scan-build-py/LICENSE.txt =================================================================== --- /dev/null +++ tools/scan-build-py/LICENSE.txt @@ -0,0 +1,63 @@ +============================================================================== +LLVM Release License +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2007-2014 University of Illinois at Urbana-Champaign. +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + +The following pieces of software have additional or alternate copyrights, +licenses, and/or restrictions: + +Program Directory +------- --------- + + Index: tools/scan-build-py/MANIFEST.in =================================================================== --- /dev/null +++ tools/scan-build-py/MANIFEST.in @@ -0,0 +1,3 @@ +include README.md +include *.txt +recursive-include libear * Index: tools/scan-build-py/README.md =================================================================== --- /dev/null +++ tools/scan-build-py/README.md @@ -0,0 +1,76 @@ +[![Build Status](https://travis-ci.org/rizsotto/Beye.svg?branch=master)](https://travis-ci.org/rizsotto/Beye) +[![Coverage Status](https://coveralls.io/repos/rizsotto/Beye/badge.svg?branch=master)](https://coveralls.io/r/rizsotto/Beye?branch=master) + +Build EYE +========= + +It's a static analyzer wrapper for [Clang][CLANG]. The original `scan-build` +is written in Perl. This package contains reimplementation of that scripts +in Python. The reimplementation diverge from the original scripts in a few +places. + + [CLANG]: http://clang.llvm.org/ + +How to get +---------- + +Will be available soon from [the Python Package Index][PyPI]. + + [PyPI]: https://pypi.python.org/pypi + +How to build +------------ + +Should be quite portable on UNIX operating systems. It has been tested on +FreeBSD, GNU/Linux and OS X. + +### Prerequisites + +1. an ANSI **C compiler**, to compile the sources. +2. **cmake**, to configure the build process. +3. **make**, to run the build. The makefile is generated by `cmake`. +4. **python** interpreter (version 2.7, 3.2, 3.3, 3.4). + +### Build commands + +Please consider to use `virtualenv` or other tool to set up the working +environment. + + $ python setup.py build + $ python setup.py install + $ python setup.py test + + +How to use +---------- + +To run the Clang static analyzer against a project goes like this: + + $ scan-build all + +To generate a compilation database file (compilation database is a JSON +file described [here][JCDB]) goes like this: + + $ scan-build intercept + +Use `--help` to know more about the commands. + + [JCDB]: http://clang.llvm.org/docs/JSONCompilationDatabase.html + +Known problems +-------------- + +Because it uses `LD_PRELOAD` or `DYLD_INSERT_LIBRARIES` environment variables, +it does not append to it, but overrides it. So builds which are using these +variables might not work. (I don't know any build tool which does that, but +please let me know if you do.) + +Problem reports +--------------- +If you find a bug in this documentation or elsewhere in the program or would +like to propose an improvement, please use the project's [github issue +tracker][ISSUES]. Please describing the bug and where you found it. If you +have a suggestion how to fix it, include that as well. Patches are also +welcome. + + [ISSUES]: https://github.com/rizsotto/Beye/issues Index: tools/scan-build-py/bin/scan-build =================================================================== --- /dev/null +++ tools/scan-build-py/bin/scan-build @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. + +import sys +import multiprocessing +from libscanbuild.driver import main + + +if __name__ == '__main__': + multiprocessing.freeze_support() + sys.exit(main()) Index: tools/scan-build-py/libear/CMakeLists.txt =================================================================== --- /dev/null +++ tools/scan-build-py/libear/CMakeLists.txt @@ -0,0 +1,38 @@ +project(ear C) + +cmake_minimum_required(VERSION 2.8) +if (NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release") +endif() + +include(CheckCCompilerFlag) +check_c_compiler_flag("-std=c99" C99_SUPPORTED) +if (C99_SUPPORTED) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99") +endif() + +include(CheckFunctionExists) +include(CheckSymbolExists) + +add_definitions(-D_GNU_SOURCE) +list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE) + +check_function_exists(execve HAVE_EXECVE) +check_function_exists(execv HAVE_EXECV) +check_function_exists(execvpe HAVE_EXECVPE) +check_function_exists(execvp HAVE_EXECVP) +check_function_exists(execvP HAVE_EXECVP2) +check_function_exists(execl HAVE_EXECL) +check_function_exists(execlp HAVE_EXECLP) +check_function_exists(execle HAVE_EXECLE) +check_function_exists(posix_spawn HAVE_POSIX_SPAWN) +check_function_exists(posix_spawnp HAVE_POSIX_SPAWNP) +check_symbol_exists(_NSGetEnviron crt_externs.h HAVE_NSGETENVIRON) + +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/config.h) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +add_library(ear SHARED ear.c) +target_link_libraries(ear ${CMAKE_DL_LIBS}) + +install(TARGETS ear LIBRARY DESTINATION libscanbuild) Index: tools/scan-build-py/libear/config.h.in =================================================================== --- /dev/null +++ tools/scan-build-py/libear/config.h.in @@ -0,0 +1,33 @@ +/* -*- coding: utf-8 -*- +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +*/ + +#pragma once + +#cmakedefine HAVE_EXECVE +#cmakedefine HAVE_EXECV +#cmakedefine HAVE_EXECVPE +#cmakedefine HAVE_EXECVP +#cmakedefine HAVE_EXECVP2 +#cmakedefine HAVE_EXECL +#cmakedefine HAVE_EXECLP +#cmakedefine HAVE_EXECLE +#cmakedefine HAVE_POSIX_SPAWN +#cmakedefine HAVE_POSIX_SPAWNP +#cmakedefine HAVE_NSGETENVIRON + +#cmakedefine APPLE + +#define ENV_OUTPUT "BEAR_OUTPUT" + +#ifdef APPLE +# define ENV_FLAT "DYLD_FORCE_FLAT_NAMESPACE" +# define ENV_PRELOAD "DYLD_INSERT_LIBRARIES" +# define ENV_SIZE 3 +#else +# define ENV_PRELOAD "LD_PRELOAD" +# define ENV_SIZE 2 +#endif Index: tools/scan-build-py/libear/ear.c =================================================================== --- /dev/null +++ tools/scan-build-py/libear/ear.c @@ -0,0 +1,554 @@ +/* -*- coding: utf-8 -*- +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +*/ + +/** + * This file implements a shared library. This library can be pre-loaded by + * the dynamic linker of the Operating System (OS). It implements a few function + * related to process creation. By pre-load this library the executed process + * uses these functions instead of those from the standard library. + * + * The idea here is to inject a logic before call the real methods. The logic is + * to dump the call into a file. To call the real method this library is doing + * the job of the dynamic linker. + * + * The only input for the log writing is about the destination directory. + * This is passed as environment variable. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include + +#if defined HAVE_POSIX_SPAWN || defined HAVE_POSIX_SPAWNP +#include +#endif + +#if defined HAVE_NSGETENVIRON +#include +static char **environ; +#else +extern char **environ; +#endif + + +typedef char const * bear_env_t[ENV_SIZE]; + +static int bear_capture_env_t(bear_env_t *env); +static void bear_restore_env_t(bear_env_t *env); +static void bear_release_env_t(bear_env_t *env); +static char const **bear_update_environment(char *const envp[], bear_env_t *env); +static char const **bear_update_environ(char const **in, char const *key, char const *value); +static void bear_report_call(char const *fun, char const *const argv[]); +static char const **bear_strings_build(char const *arg, va_list *ap); +static char const **bear_strings_copy(char const **const in); +static char const **bear_strings_append(char const **in, char const *e); +static size_t bear_strings_length(char const *const *in); +static void bear_strings_release(char const **); + + +static bear_env_t env_names = + { ENV_OUTPUT + , ENV_PRELOAD +#ifdef ENV_FLAT + , ENV_FLAT +#endif + }; + +static bear_env_t initial_env = + { 0 + , 0 +#ifdef ENV_FLAT + , 0 +#endif + }; + +static int initialized = 0; + +static void on_load(void) __attribute__((constructor)); +static void on_unload(void) __attribute__((destructor)); + + +#define DLSYM(TYPE_, VAR_, SYMBOL_) \ + union { \ + void *from; \ + TYPE_ to; \ + } cast; \ + if (0 == (cast.from = dlsym(RTLD_NEXT, SYMBOL_))) { \ + perror("bear: dlsym"); \ + exit(EXIT_FAILURE); \ + } \ + TYPE_ const VAR_ = cast.to; + + +#ifdef HAVE_EXECVE +static int call_execve(const char *path, char *const argv[], + char *const envp[]); +#endif +#ifdef HAVE_EXECVP +static int call_execvp(const char *file, char *const argv[]); +#endif +#ifdef HAVE_EXECVPE +static int call_execvpe(const char *file, char *const argv[], + char *const envp[]); +#endif +#ifdef HAVE_EXECVP2 +static int call_execvP(const char *file, const char *search_path, + char *const argv[]); +#endif +#ifdef HAVE_POSIX_SPAWN +static int call_posix_spawn(pid_t *restrict pid, const char *restrict path, + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t *restrict attrp, + char *const argv[restrict], + char *const envp[restrict]); +#endif +#ifdef HAVE_POSIX_SPAWNP +static int call_posix_spawnp(pid_t *restrict pid, const char *restrict file, + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t *restrict attrp, + char *const argv[restrict], + char *const envp[restrict]); +#endif + + +/* Initialization method to Captures the relevant environment variables. + */ + +static void on_load(void) { +#ifdef HAVE_NSGETENVIRON + environ = *_NSGetEnviron(); +#endif + if (!initialized) + initialized = bear_capture_env_t(&initial_env); +} + +static void on_unload(void) { + bear_release_env_t(&initial_env); + initialized = 0; +} + + +/* These are the methods we are try to hijack. + */ + +#ifdef HAVE_EXECVE +int execve(const char *path, char *const argv[], char *const envp[]) { + bear_report_call(__func__, (char const *const *)argv); + return call_execve(path, argv, envp); +} +#endif + +#ifdef HAVE_EXECV +#ifndef HAVE_EXECVE +#error can not implement execv without execve +#endif +int execv(const char *path, char *const argv[]) { + bear_report_call(__func__, (char const *const *)argv); + return call_execve(path, argv, environ); +} +#endif + +#ifdef HAVE_EXECVPE +int execvpe(const char *file, char *const argv[], char *const envp[]) { + bear_report_call(__func__, (char const *const *)argv); + return call_execvpe(file, argv, envp); +} +#endif + +#ifdef HAVE_EXECVP +int execvp(const char *file, char *const argv[]) { + bear_report_call(__func__, (char const *const *)argv); + return call_execvp(file, argv); +} +#endif + +#ifdef HAVE_EXECVP2 +int execvP(const char *file, const char *search_path, char *const argv[]) { + bear_report_call(__func__, (char const *const *)argv); + return call_execvP(file, search_path, argv); +} +#endif + +#ifdef HAVE_EXECL +#ifndef HAVE_EXECVE +#error can not implement execl without execve +#endif +int execl(const char *path, const char *arg, ...) { + va_list args; + va_start(args, arg); + char const **argv = bear_strings_build(arg, &args); + va_end(args); + + bear_report_call(__func__, (char const *const *)argv); + int const result = call_execve(path, (char *const *)argv, environ); + + bear_strings_release(argv); + return result; +} +#endif + +#ifdef HAVE_EXECLP +#ifndef HAVE_EXECVP +#error can not implement execlp without execvp +#endif +int execlp(const char *file, const char *arg, ...) { + va_list args; + va_start(args, arg); + char const **argv = bear_strings_build(arg, &args); + va_end(args); + + bear_report_call(__func__, (char const *const *)argv); + int const result = call_execvp(file, (char *const *)argv); + + bear_strings_release(argv); + return result; +} +#endif + +#ifdef HAVE_EXECLE +#ifndef HAVE_EXECVE +#error can not implement execle without execve +#endif +// int execle(const char *path, const char *arg, ..., char * const envp[]); +int execle(const char *path, const char *arg, ...) { + va_list args; + va_start(args, arg); + char const **argv = bear_strings_build(arg, &args); + char const **envp = va_arg(args, char const **); + va_end(args); + + bear_report_call(__func__, (char const *const *)argv); + int const result = + call_execve(path, (char *const *)argv, (char *const *)envp); + + bear_strings_release(argv); + return result; +} +#endif + +#ifdef HAVE_POSIX_SPAWN +int posix_spawn(pid_t *restrict pid, const char *restrict path, + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t *restrict attrp, + char *const argv[restrict], char *const envp[restrict]) { + bear_report_call(__func__, (char const *const *)argv); + return call_posix_spawn(pid, path, file_actions, attrp, argv, envp); +} +#endif + +#ifdef HAVE_POSIX_SPAWNP +int posix_spawnp(pid_t *restrict pid, const char *restrict file, + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t *restrict attrp, + char *const argv[restrict], char *const envp[restrict]) { + bear_report_call(__func__, (char const *const *)argv); + return call_posix_spawnp(pid, file, file_actions, attrp, argv, envp); +} +#endif + +/* These are the methods which forward the call to the standard implementation. + */ + +#ifdef HAVE_EXECVE +static int call_execve(const char *path, char *const argv[], + char *const envp[]) { + typedef int (*func)(const char *, char *const *, char *const *); + + DLSYM(func, fp, "execve"); + + char const **const menvp = bear_update_environment(envp, &initial_env); + int const result = (*fp)(path, argv, (char *const *)menvp); + bear_strings_release(menvp); + return result; +} +#endif + +#ifdef HAVE_EXECVPE +static int call_execvpe(const char *file, char *const argv[], + char *const envp[]) { + typedef int (*func)(const char *, char *const *, char *const *); + + DLSYM(func, fp, "execvpe"); + + char const **const menvp = bear_update_environment(envp, &initial_env); + int const result = (*fp)(file, argv, (char *const *)menvp); + bear_strings_release(menvp); + return result; +} +#endif + +#ifdef HAVE_EXECVP +static int call_execvp(const char *file, char *const argv[]) { + typedef int (*func)(const char *file, char *const argv[]); + + DLSYM(func, fp, "execvp"); + + bear_env_t current; + bear_capture_env_t(¤t); + bear_restore_env_t(&initial_env); + int const result = (*fp)(file, argv); + bear_restore_env_t(¤t); + bear_release_env_t(¤t); + + return result; +} +#endif + +#ifdef HAVE_EXECVP2 +static int call_execvP(const char *file, const char *search_path, + char *const argv[]) { + typedef int (*func)(const char *, const char *, char *const *); + + DLSYM(func, fp, "execvP"); + + bear_env_t current; + bear_capture_env_t(¤t); + bear_restore_env_t(&initial_env); + int const result = (*fp)(file, search_path, argv); + bear_restore_env_t(¤t); + bear_release_env_t(¤t); + + return result; +} +#endif + +#ifdef HAVE_POSIX_SPAWN +static int call_posix_spawn(pid_t *restrict pid, const char *restrict path, + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t *restrict attrp, + char *const argv[restrict], + char *const envp[restrict]) { + typedef int (*func)(pid_t *restrict, const char *restrict, + const posix_spawn_file_actions_t *, + const posix_spawnattr_t *restrict, + char *const *restrict, char *const *restrict); + + DLSYM(func, fp, "posix_spawn"); + + char const **const menvp = bear_update_environment(envp, &initial_env); + int const result = + (*fp)(pid, path, file_actions, attrp, argv, (char *const *restrict)menvp); + bear_strings_release(menvp); + return result; +} +#endif + +#ifdef HAVE_POSIX_SPAWNP +static int call_posix_spawnp(pid_t *restrict pid, const char *restrict file, + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t *restrict attrp, + char *const argv[restrict], + char *const envp[restrict]) { + typedef int (*func)(pid_t *restrict, const char *restrict, + const posix_spawn_file_actions_t *, + const posix_spawnattr_t *restrict, + char *const *restrict, char *const *restrict); + + DLSYM(func, fp, "posix_spawnp"); + + char const **const menvp = bear_update_environment(envp, &initial_env); + int const result = + (*fp)(pid, file, file_actions, attrp, argv, (char *const *restrict)menvp); + bear_strings_release(menvp); + return result; +} +#endif + +/* this method is to write log about the process creation. */ + +static void bear_report_call(char const *fun, char const *const argv[]) { + static int const GS = 0x1d; + static int const RS = 0x1e; + static int const US = 0x1f; + + if (!initialized) + return; + + const char *cwd = getcwd(NULL, 0); + if (0 == cwd) { + perror("bear: getcwd"); + exit(EXIT_FAILURE); + } + char const * const out_dir = initial_env[0]; + size_t const path_max_length = strlen(out_dir) + 32; + char filename[path_max_length]; + if (-1 == snprintf(filename, path_max_length, "%s/%d.cmd", out_dir, getpid())) { + perror("bear: snprintf"); + exit(EXIT_FAILURE); + } + FILE * fd = fopen(filename, "a+"); + if (0 == fd) { + perror("bear: fopen"); + exit(EXIT_FAILURE); + } + fprintf(fd, "%d%c", getpid(), RS); + fprintf(fd, "%d%c", getppid(), RS); + fprintf(fd, "%s%c", fun, RS); + fprintf(fd, "%s%c", cwd, RS); + size_t const argc = bear_strings_length(argv); + for (size_t it = 0; it < argc; ++it) { + fprintf(fd, "%s%c", argv[it], US); + } + fprintf(fd, "%c", GS); + if (fclose(fd)) { + perror("bear: fclose"); + exit(EXIT_FAILURE); + } + free((void *)cwd); +} + +/* update environment assure that chilren processes will copy the desired + * behaviour */ + +static int bear_capture_env_t(bear_env_t *env) { + int status = 1; + for (size_t it = 0; it < ENV_SIZE; ++it) { + char const * const env_value = getenv(env_names[it]); + char const * const env_copy = (env_value) ? strdup(env_value) : env_value; + (*env)[it] = env_copy; + status &= (env_copy) ? 1 : 0; + } + return status; +} + +static void bear_restore_env_t(bear_env_t *env) { + for (size_t it = 0; it < ENV_SIZE; ++it) + if (((*env)[it]) + ? setenv(env_names[it], (*env)[it], 1) + : unsetenv(env_names[it])) { + perror("bear: setenv"); + exit(EXIT_FAILURE); + } +} + +static void bear_release_env_t(bear_env_t *env) { + for (size_t it = 0; it < ENV_SIZE; ++it) { + free((void *)(*env)[it]); + (*env)[it] = 0; + } +} + +static char const **bear_update_environment(char *const envp[], bear_env_t *env) { + char const **result = bear_strings_copy((char const **)envp); + for (size_t it = 0; it < ENV_SIZE && (*env)[it]; ++it) + result = bear_update_environ(result, env_names[it], (*env)[it]); + return result; +} + +static char const **bear_update_environ(char const *envs[], char const *key, char const * const value) { + // find the key if it's there + size_t const key_length = strlen(key); + char const **it = envs; + for (; (it) && (*it); ++it) { + if ((0 == strncmp(*it, key, key_length)) && + (strlen(*it) > key_length) && ('=' == (*it)[key_length])) + break; + } + // allocate a environment entry + size_t const value_length = strlen(value); + size_t const env_length = key_length + value_length + 2; + char *env = malloc(env_length); + if (0 == env) { + perror("bear: malloc [in env_update]"); + exit(EXIT_FAILURE); + } + if (-1 == snprintf(env, env_length, "%s=%s", key, value)) { + perror("bear: snprintf"); + exit(EXIT_FAILURE); + } + // replace or append the environment entry + if (it && *it) { + free((void *)*it); + *it = env; + return envs; + } + return bear_strings_append(envs, env); +} + +/* util methods to deal with string arrays. environment and process arguments + * are both represented as string arrays. */ + +static char const **bear_strings_build(char const *const arg, va_list *args) { + char const **result = 0; + size_t size = 0; + for (char const *it = arg; it; it = va_arg(*args, char const *)) { + result = realloc(result, (size + 1) * sizeof(char const *)); + if (0 == result) { + perror("bear: realloc"); + exit(EXIT_FAILURE); + } + char const *copy = strdup(it); + if (0 == copy) { + perror("bear: strdup"); + exit(EXIT_FAILURE); + } + result[size++] = copy; + } + result = realloc(result, (size + 1) * sizeof(char const *)); + if (0 == result) { + perror("bear: realloc"); + exit(EXIT_FAILURE); + } + result[size++] = 0; + + return result; +} + +static char const **bear_strings_copy(char const **const in) { + size_t const size = bear_strings_length(in); + + char const **const result = malloc((size + 1) * sizeof(char const *)); + if (0 == result) { + perror("bear: malloc"); + exit(EXIT_FAILURE); + } + + char const **out_it = result; + for (char const *const *in_it = in; (in_it) && (*in_it); + ++in_it, ++out_it) { + *out_it = strdup(*in_it); + if (0 == *out_it) { + perror("bear: strdup"); + exit(EXIT_FAILURE); + } + } + *out_it = 0; + return result; +} + +static char const **bear_strings_append(char const **const in, + char const *const e) { + size_t size = bear_strings_length(in); + char const **result = realloc(in, (size + 2) * sizeof(char const *)); + if (0 == result) { + perror("bear: realloc"); + exit(EXIT_FAILURE); + } + result[size++] = e; + result[size++] = 0; + return result; +} + +static size_t bear_strings_length(char const *const *const in) { + size_t result = 0; + for (char const *const *it = in; (it) && (*it); ++it) + ++result; + return result; +} + +static void bear_strings_release(char const **in) { + for (char const *const *it = in; (it) && (*it); ++it) { + free((void *)*it); + } + free((void *)in); +} Index: tools/scan-build-py/libscanbuild/__init__.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/__init__.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" +This module responsible to run the Clang static analyzer against any build +and generate reports. + +This work is derived from the original 'scan-build' Perl implementation and +from an independent project 'bear'. For history let me record how the Perl +implementation was working. Then explain how is it now. + +============================= +Perl implementation internals +============================= + +There were two major parts of the original design. The compiler wrappers +('ccc-analyzer' and 'c++-analyzer') and the driver ('scan-build'). +De facto 'CC' and 'CXX' environment variables are the standard way to make +your build configurable on compilers. (A build might respect this convention +or not.) When the driver started it overrides the 'CC' and 'CXX' environment +variables with the wrapper files and start the build. The wrappers are doing +their jobs (explained later) generates the desired output files and the +reports. Then the driver goes through on the reports and generates a "cover" +for it. + +As you can see the driver is the only interface for the user. The wrappers +are doing the real work. The communication between the two parts is done via +environment variables. The driver not only set the 'CC' and 'CXX', but many +others as well. + +As the wrappers called as compilers. These should do behave like a compiler. +So, it calls the real compiler (it choose from environment variables, depends +from the OS type). This step generates the desired output file, so the build +can carry on. The exit code of the compilation is saved to be the exit code +of the wrapper. Then it execute the analyzer if that is needed. This is a +complex logic: it parses the command line arguments, it picks the needed +arguments to run the analyzer or decide to run or not. And it executes the +analyzer and exit. + +The static analyzer is inside the Clang binary, can be triggered by special +command line argument. To run the analyzer against a single file, +wrapper collect arguments from the the current command line. And also +make arguments from the driver's command line parameters (which were +passed as environment variables). + +If the analyzer fails, then the wrapper generates error report. This is +optional, but when it triggered then those go into the "cover". + +=========================== +Current implementation idea +=========================== + +The current design address these tasks separably. The major split is between +to capture the compilations and record it into a compilation database, and run +the analyzer against all file in the compilation database. + +To capture the compiler invocation can be done as the Perl implementation +was doing. (To override the 'CC' and 'CXX' variables.) But that depends on the +build process do respect those variables or not. For better coverage 'bear' +was using the pre-load feature of the OS dynamic linker. Details explained +later, the point here is to generate the compilation database can be done in +multiple ways, but keep the compilation database as an interface between these +two steps. + +To run the analyzer against the entire project is more easier. It could be done +in a single executable (no need to pass environment variables between +processes) and parallelism can be exploited. The analyzer execution is also +implemented in splits. As earlier explained, a single analyzer run depends from +these two factors: the command line parameters of the 'scan-build' and the +command line parameter of the individual compilation. So steps like generate a +command to analyzer, execute it can be done parallel. Then to collect the +outputs and generate the "cover" also can be divided and make it parallel. + +For more please check the individual modules. """ + + +def duplicate_check(method): + """ Predicate to detect duplicated entries. + + Unique hash method can be use to detect duplicates. Entries are + represented as dictionaries, which has no default hash method. + This implementation uses a set datatype to store the unique hash values. + + This method returns a method which can detect the duplicate values. """ + + def predicate(entry): + entry_hash = predicate.unique(entry) + if entry_hash not in predicate.state: + predicate.state.add(entry_hash) + return False + return True + + predicate.unique = method + predicate.state = set() + return predicate + + +def tempdir(): + """ Return the default temorary directory. """ + + from os import getenv + return getenv('TMPDIR', getenv('TEMP', getenv('TMP', '/tmp'))) Index: tools/scan-build-py/libscanbuild/clang.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/clang.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" This module is responsible for the Clang executable. + +Since Clang command line interface is so rich, but this project is using only +a subset of that, it makes sense to create a function specific wrapper. """ + +import subprocess +import logging +import re +import shlex + +__all__ = ['get_version', 'get_arguments', 'get_checkers'] + + +def get_version(cmd): + """ Returns the compiler version as string. """ + + lines = subprocess.check_output([cmd, '-v'], stderr=subprocess.STDOUT) + return lines.decode('ascii').splitlines()[0] + + +def get_arguments(cwd, command): + """ Capture Clang invocation. + + Clang can be executed directly (when you just ask specific action to + execute) or indirect way (whey you first ask Clang to print the command + to run for that compilation, and then execute the given command). + + This method receives the full command line for direct compilation. And + it generates the command for indirect compilation. """ + + def lastline(stream): + last = None + for line in stream: + last = line + if last is None: + raise Exception("output not found") + return last + + def strip_quotes(quoted): + match = re.match(r'^\"([^\"]*)\"$', quoted) + return match.group(1) if match else quoted + + cmd = command[:] + cmd.insert(1, '-###') + logging.debug('exec command in %s: %s', cwd, ' '.join(cmd)) + child = subprocess.Popen(cmd, + cwd=cwd, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + line = lastline(child.stdout) + child.stdout.close() + child.wait() + if 0 == child.returncode: + if re.match(r'^clang: error:', line): + raise Exception(line) + return [strip_quotes(x) for x in shlex.split(line)] + else: + raise Exception(line) + + +def get_active_checkers(clang, plugins): + """ To get the default plugins we execute Clang to print how this + compilation would be called. + + For input file we specify stdin and pass only language information. """ + + def checkers(language): + """ Returns a list of active checkers for the given language. """ + + load = [elem for plugin in plugins for elem in ['-Xclang', '-load', + '-Xclang', plugin]] + cmd = [clang, '--analyze'] + load + ['-x', language, '-'] + pattern = re.compile(r'^-analyzer-checker=(.*)$') + return [pattern.match(arg).group(1) for arg in get_arguments('.', cmd) + if pattern.match(arg)] + + result = set() + for language in ['c', 'c++', 'objective-c', 'objective-c++']: + result.update(checkers(language)) + return result + + +def get_checkers(clang, plugins): + """ Get all the available checkers from default and from the plugins. + + clang -- the compiler we are using + plugins -- list of plugins which was requested by the user + + This method returns a dictionary of all available checkers and status. + + {: (, )} """ + + plugins = plugins if plugins else [] + + def parse_checkers(stream): + """ Parse clang -analyzer-checker-help output. + + Below the line 'CHECKERS:' are there the name description pairs. + Many of them are in one line, but some long named plugins has the + name and the description in separate lines. + + The plugin name is always prefixed with two space character. The + name contains no whitespaces. Then followed by newline (if it's + too long) or other space characters comes the description of the + plugin. The description ends with a newline character. """ + + # find checkers header + for line in stream: + if re.match(r'^CHECKERS:', line): + break + # find entries + state = None + for line in stream: + if state and not re.match(r'^\s\s\S', line): + yield (state, line.strip()) + state = None + elif re.match(r'^\s\s\S+$', line.rstrip()): + state = line.strip() + else: + pattern = re.compile(r'^\s\s(?P\S*)\s*(?P.*)') + match = pattern.match(line.rstrip()) + if match: + current = match.groupdict() + yield (current['key'], current['value']) + + def is_active(actives, entry): + """ Returns true if plugin name is matching the active plugin names. + + actives -- set of active plugin names (or prefixes). + entry -- the current plugin name to judge. + + The active plugin names are specific plugin names or prefix of some + names. One example for prefix, when it say 'unix' and it shall match + on 'unix.API', 'unix.Malloc' and 'unix.MallocSizeof'. """ + + return any(re.match(r'^' + a + r'(\.|$)', entry) for a in actives) + + actives = get_active_checkers(clang, plugins) + + load = [elem for plugin in plugins for elem in ['-load', plugin]] + cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help'] + + logging.debug('exec command: %s', ' '.join(cmd)) + child = subprocess.Popen(cmd, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + checkers = { + k: (v, is_active(actives, k)) + for k, v in parse_checkers(child.stdout) + } + child.stdout.close() + child.wait() + if 0 == child.returncode and len(checkers): + return checkers + else: + raise Exception('Could not query Clang for available checkers.') Index: tools/scan-build-py/libscanbuild/command.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/command.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" This module is responsible for to parse a compiler invocation. """ + +import re + +__all__ = ['Action', 'classify_parameters'] + + +class Action(object): + """ Enumeration class for compiler action. """ + + Link, Compile, Preprocess, Info, Internal = range(5) + + +def classify_parameters(command): + """ Parses the command line arguments of the given invocation. + + To run analysis from a compilation command, first it disassembles the + compilation command. Classifies the parameters into groups and throws + away those which are not relevant. """ + + def match(state, iterator): + """ This method contains a list of pattern and action tuples. + The matching start from the top if the list, when the first match + happens the action is executed. """ + + def regex(pattern, action): + """ Matching expression for regex. """ + + def evaluate(iterator): + match = evaluate.regexp.match(iterator.current()) + if match: + action(state, iterator, match) + return True + + evaluate.regexp = re.compile(pattern) + return evaluate + + def anyof(opts, action): + """ Matching expression for string literals. """ + + def evaluate(iterator): + if iterator.current() in opts: + action(state, iterator, None) + return True + + return evaluate + + tasks = [ + # actions + regex(r'^-(E|MM?)$', take_action(Action.Preprocess)), + anyof({'-c'}, take_action(Action.Compile)), + anyof({'-print-prog-name'}, take_action(Action.Info)), + anyof({'-cc1'}, take_action(Action.Internal)), + # architectures + anyof({'-arch'}, take_two('archs_seen')), + # module names + anyof({'-filelist'}, take_from_file('files')), + regex(r'^[^-].+', take_one('files')), + # language + anyof({'-x'}, take_second('language')), + # output + anyof({'-o'}, take_second('output')), + # relevant compiler flags + anyof({'-write-strings', '-v'}, take_one('compile_options')), + anyof({'-ftrapv-handler', '--sysroot', '-target'}, + take_two('compile_options')), + regex(r'^-isysroot', take_two('compile_options')), + regex(r'^-m(32|64)$', take_one('compile_options')), + regex(r'^-mios-simulator-version-min(.*)', + take_joined('compile_options')), + regex(r'^-stdlib(.*)', take_joined('compile_options')), + regex(r'^-mmacosx-version-min(.*)', + take_joined('compile_options')), + regex(r'^-miphoneos-version-min(.*)', + take_joined('compile_options')), + regex(r'^-O[1-3]$', take_one('compile_options')), + anyof({'-O'}, take_as('-O1', 'compile_options')), + anyof({'-Os'}, take_as('-O2', 'compile_options')), + regex(r'^-[DIU](.*)$', take_joined('compile_options')), + regex(r'^-isystem(.*)$', take_joined('compile_options')), + anyof({'-nostdinc'}, take_one('compile_options')), + regex(r'^-std=', take_one('compile_options')), + regex(r'^-include', take_two('compile_options')), + anyof({ + '-idirafter', '-imacros', '-iprefix', '-iwithprefix', + '-iwithprefixbefore' + }, take_two('compile_options')), + regex(r'^-m.*', take_one('compile_options')), + regex(r'^-iquote(.*)', take_joined('compile_options')), + regex(r'^-Wno-', take_one('compile_options')), + # ignored flags + regex(r'^-framework$', take_two()), + regex(r'^-fobjc-link-runtime(.*)', take_joined()), + regex(r'^-[lL]', take_one()), + regex(r'^-M[TF]$', take_two()), + regex(r'^-[eu]$', take_two()), + anyof({'-fsyntax-only', '-save-temps'}, take_one()), + anyof({ + '-install_name', '-exported_symbols_list', '-current_version', + '-compatibility_version', '-init', '-seg1addr', + '-bundle_loader', '-multiply_defined', '--param', + '--serialize-diagnostics' + }, take_two()), + anyof({'-sectorder'}, take_four()), + # relevant compiler flags + regex(r'^-[fF](.+)$', take_one('compile_options')) + ] + for task in tasks: + if task(iterator): + return + + state = {'action': Action.Link, 'cxx': is_cplusplus_compiler(command[0])} + + arguments = Arguments(command) + for _ in arguments: + match(state, arguments) + return state + + +class Arguments(object): + """ An iterator wraper around compiler arguments. + + Python iterators are only implement the 'next' method, but this one + implements the 'current' query method as well. """ + + def __init__(self, args): + """ Takes full command line, but iterates on the parameters only. """ + + self.__sequence = [arg for arg in args[1:] if arg != ''] + self.__size = len(self.__sequence) + self.__current = -1 + + def __iter__(self): + """ Needed for python iterator. """ + + return self + + def __next__(self): + """ Needed for python iterator. (version 3.x) """ + + return self.next() + + def next(self): + """ Needed for python iterator. (version 2.x) """ + + self.__current += 1 + return self.current() + + def current(self): + """ Extra method to query the current element. """ + + if self.__current >= self.__size: + raise StopIteration + else: + return self.__sequence[self.__current] + + +def take_n(count=1, *keys): + """ Take N number of arguments and append it to the refered values. """ + + def take(values, iterator, _match): + updates = [] + updates.append(iterator.current()) + for _ in range(count - 1): + updates.append(iterator.next()) + for key in keys: + current = values.get(key, []) + values.update({key: current + updates}) + + return take + + +def take_one(*keys): + """ Take one argument and append to the 'key' values. """ + + return take_n(1, *keys) + + +def take_two(*keys): + """ Take two arguments and append to the 'key' values. """ + + return take_n(2, *keys) + + +def take_four(*keys): + """ Take four arguments and append to the 'key' values. """ + + return take_n(4, *keys) + + +def take_joined(*keys): + """ Take one or two arguments and append to the 'key' values. + + eg.: '-Isomething' shall take only one. + '-I something' shall take two. + + This action should go with regex matcher only. """ + + def take(values, iterator, match): + updates = [] + updates.append(iterator.current()) + if not match.group(1): + updates.append(iterator.next()) + for key in keys: + current = values.get(key, []) + values.update({key: current + updates}) + + return take + + +def take_from_file(*keys): + """ Take values from the refered file and append to the 'key' values. + + The refered file is the second argument. (So it consume two args.) """ + + def take(values, iterator, _match): + with open(iterator.next()) as handle: + current = [line.strip() for line in handle.readlines()] + for key in keys: + values[key] = current + + return take + + +def take_as(value, *keys): + """ Take one argument and append to the 'key' values. + + But instead of taking the argument, it takes the value as it was given. """ + + def take(values, _iterator, _match): + updates = [value] + for key in keys: + current = values.get(key, []) + values.update({key: current + updates}) + + return take + + +def take_second(*keys): + """ Take the second argument and append to the 'key' values. """ + + def take(values, iterator, _match): + current = iterator.next() + for key in keys: + values[key] = current + + return take + + +def take_action(action): + """ Take the action value and overwrite current value if that's bigger. """ + + def take(values, _iterator, _match): + key = 'action' + current = values[key] + values[key] = max(current, action) + + return take + + +def is_cplusplus_compiler(name): + """ Returns true when the compiler name refer to a C++ compiler. """ + + match = re.match(r'^([^/]*/)*(\w*-)*(\w+\+\+)(-(\d+(\.\d+){0,3}))?$', name) + return False if match is None else True Index: tools/scan-build-py/libscanbuild/driver.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/driver.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" This module implements the 'scan-build' command API. + +To run the static analyzer against a build is done in multiple steps: + + -- Intercept: capture the compilation command during the build, + -- Analyze: run the analyzer against the captured commands, + -- Report: create a cover report from the analyzer outputs. """ + +import logging +import sys +import re +import os +import os.path +import time +import json +import tempfile +import multiprocessing +from libscanbuild import tempdir +from libscanbuild.runner import run +from libscanbuild.intercept import capture +from libscanbuild.options import create_parser +from libscanbuild.report import document +from libscanbuild.clang import get_checkers + +__all__ = ['main'] + + +def main(): + """ Entry point for 'scan-build'. """ + + try: + parser = create_parser() + args = parser.parse_args() + validate(parser, args) + # setup logging + initialize_logging(args) + logging.debug('Parsed arguments: %s', args) + + # run build command and capture compiler executions + exit_code = capture(args) if args.action in {'all', 'intercept'} else 0 + # when we only do interception the job is done + if args.action == 'intercept': + return exit_code + + # next step to run the analyzer against the captured commands + with ReportDirectory(args.output, args.keep_empty) as target_dir: + run_analyzer(args, target_dir.name) + # cover report generation and bug counting + number_of_bugs = document(args, target_dir.name) + # remove the compilation database when it was not requested + if args.action == 'all' and os.path.exists(args.cdb): + os.unlink(args.cdb) + # set exit status as it was requested + return number_of_bugs if args.status_bugs else exit_code + except KeyboardInterrupt: + return 1 + except Exception: + logging.exception("Something unexpected had happened.") + return 127 + + +def initialize_logging(args): + """ Logging format controlled by the 'verbose' command line argument. """ + + fmt_string = '{0}: %(levelname)s: %(message)s' + + if 0 == args.verbose: + level = logging.WARNING + elif 1 == args.verbose: + level = logging.INFO + elif 2 == args.verbose: + level = logging.DEBUG + else: + level = logging.DEBUG + fmt_string = '{0}: %(levelname)s: %(funcName)s: %(message)s' + + program = os.path.basename(sys.argv[0]) + logging.basicConfig(format=fmt_string.format(program), level=level) + + +def validate(parser, args): + """ Validation done by the parser itself, but semantic check still + needs to be done. This method is doing that. """ + + if not args.action: + parser.error('missing action') + + if args.action in {'all', 'analyze'}: + if args.help_checkers_verbose: + print_checkers(get_checkers(args.clang, args.plugins)) + parser.exit() + elif args.help_checkers: + print_active_checkers(get_checkers(args.clang, args.plugins)) + parser.exit() + + if args.action in {'all', 'intercept'} and not args.build: + parser.error('missing build command') + + +def run_analyzer(args, output_dir): + """ Runs the analyzer against the given compilation database. """ + + def exclude(filename): + """ Return true when any excluded directory prefix the filename. """ + return any(re.match(r'^' + directory, filename) for directory + in args.excludes) + + consts = { + 'clang': args.clang, + 'output_dir': output_dir, + 'output_format': args.output_format, + 'report_failures': args.report_failures, + 'direct_args': analyzer_params(args) + } + + with open(args.cdb, 'r') as handle: + generator = (dict(cmd, **consts) for cmd in json.load(handle) + if not exclude(cmd['file'])) + # when verbose output requested execute sequentially + pool = multiprocessing.Pool(1 if 2 < args.verbose else None) + for current in pool.imap_unordered(run, generator): + if current is not None: + # display error message from the static analyzer + for line in current['error_output']: + logging.info(line.rstrip()) + pool.close() + pool.join() + + +def analyzer_params(args): + """ A group of command line arguments can mapped to command + line arguments of the analyzer. This method generates those. """ + + def prefix_with(constant, pieces): + """ From a sequence create another sequence where every second element + is from the original sequence and the odd elements are the prefix. + + eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """ + + return [elem for piece in pieces for elem in [constant, piece]] + + result = [] + + if args.store_model: + result.append('-analyzer-store={0}'.format(args.store_model)) + if args.constraints_model: + result.append( + '-analyzer-constraints={0}'.format(args.constraints_model)) + if args.internal_stats: + result.append('-analyzer-stats') + if args.analyze_headers: + result.append('-analyzer-opt-analyze-headers') + if args.stats: + result.append('-analyzer-checker=debug.Stats') + if args.maxloop: + result.extend(['-analyzer-max-loop', str(args.maxloop)]) + if args.output_format: + result.append('-analyzer-output={0}'.format(args.output_format)) + if args.analyzer_config: + result.append(args.analyzer_config) + if 2 <= args.verbose: + result.append('-analyzer-display-progress') + if args.plugins: + result.extend(prefix_with('-load', args.plugins)) + if args.enable_checker: + result.extend(prefix_with('-analyzer-checker', args.enable_checker)) + if args.disable_checker: + result.extend( + prefix_with('-analyzer-disable-checker', args.disable_checker)) + if os.getenv('UBIVIZ'): + result.append('-analyzer-viz-egraph-ubigraph') + + return prefix_with('-Xclang', result) + + +def print_active_checkers(checkers): + """ Print active checkers to stdout. """ + + for name in sorted(name for name, (_, active) in checkers.items() + if active): + print(name) + + +def print_checkers(checkers): + """ Print verbose checker help to stdout. """ + + print('') + print('available checkers:') + print('') + for name in sorted(checkers.keys()): + description, active = checkers[name] + prefix = '+' if active else ' ' + if len(name) > 30: + print(' {0} {1}'.format(prefix, name)) + print(' ' * 35 + description) + else: + print(' {0} {1: <30} {2}'.format(prefix, name, description)) + print('') + print('NOTE: "+" indicates that an analysis is enabled by default.') + print('') + + +class ReportDirectory(object): + """ Responsible for the report directory. + + hint -- could specify the parent directory of the output directory. + keep -- a boolean value to keep or delete the empty report directory. """ + + def __init__(self, hint, keep): + self.name = ReportDirectory._create(hint) + self.keep = keep + + def __enter__(self): + return self + + def __exit__(self, _type, _value, _traceback): + if os.listdir(self.name): + msg = "Run 'scan-view %s' to examine bug reports." + self.keep = True + else: + if self.keep: + msg = "Report directory '%s' contans no report, but kept." + else: + msg = "Removing directory '%s' because it contains no report." + logging.warning(msg, self.name) + + if not self.keep: + os.rmdir(self.name) + + @staticmethod + def _create(hint): + if tempdir() != hint: + try: + os.mkdir(hint) + return hint + except OSError: + raise + else: + stamp = time.strftime('%Y-%m-%d-%H%M%S', time.localtime()) + return tempfile.mkdtemp(prefix='scan-build-{0}-'.format(stamp)) Index: tools/scan-build-py/libscanbuild/intercept.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/intercept.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" This module is responsible to capture the compiler invocation of any +build process. The result of that should be a compilation database. + +This implementation is using the LD_PRELOAD or DYLD_INSERT_LIBRARIES +mechanisms provided by the dynamic linker. The related library is implemented +in C language and can be found under 'libear' directory. + +The 'libear' library is capturing all child process creation and logging the +relevant information about it into separate files in a specified directory. +The parameter of this process is the output directory name, where the report +files shall be placed. This parameter is passed as an environment variable. + +The module implements the build command execution with the 'libear' library +and the post-processing of the output files, which will condensates into a +(might be empty) compilation database. """ + +import logging +import subprocess +import json +import sys +import os +import os.path +import re +import shlex +import pkg_resources +import itertools +from libscanbuild import duplicate_check, tempdir +from libscanbuild.command import Action, classify_parameters + +__all__ = ['capture'] + +if 'darwin' == sys.platform: + ENVIRONMENTS = [("ENV_OUTPUT", "BEAR_OUTPUT"), + ("ENV_PRELOAD", "DYLD_INSERT_LIBRARIES"), + ("ENV_FLAT", "DYLD_FORCE_FLAT_NAMESPACE")] +else: + ENVIRONMENTS = [("ENV_OUTPUT", "BEAR_OUTPUT"), + ("ENV_PRELOAD", "LD_PRELOAD")] + + +def capture(args): + """ The entry point of build command interception. """ + + def post_processing(commands): + # run post processing only if that was requested + if 'raw_entries' not in args or not args.raw_entries: + # create entries from the current run + current = itertools.chain.from_iterable( + # creates a sequence of entry generators from an exec, + # but filter out non compiler calls before. + (format_entry(x) for x in commands if is_compiler_call(x))) + # read entries from previous run + if 'append' in args and args.append and os.path.exists(args.cdb): + with open(args.cdb) as handle: + previous = iter(json.load(handle)) + else: + previous = iter([]) + # filter out duplicate entries from both + duplicate = duplicate_check(entry_hash) + return (entry for entry in itertools.chain(previous, current) + if os.path.exists(entry['file']) and not duplicate(entry)) + return commands + + with TemporaryDirectory(prefix='bear-', dir=tempdir()) as tmpdir: + # run the build command + exit_code = run_build(args.build, tmpdir) + logging.debug('build finished with exit code: %d', exit_code) + # read the intercepted exec calls + commands = (parse_exec_trace(os.path.join(tmpdir, filename)) + for filename in sorted(os.listdir(tmpdir))) + # do post processing + entries = post_processing(itertools.chain.from_iterable(commands)) + # dump the compilation database + with open(args.cdb, 'w+') as handle: + json.dump(list(entries), handle, sort_keys=True, indent=4) + return exit_code + + +def run_build(command, destination): + """ Runs the original build command. + + It sets the required environment variables and execute the given command. + The exec calls will be logged by the 'libear' preloaded library. """ + + lib_name = 'libear.dylib' if 'darwin' == sys.platform else 'libear.so' + ear_so_file = pkg_resources.resource_filename('libscanbuild', lib_name) + + environment = dict(os.environ) + for alias, key in ENVIRONMENTS: + value = '1' + if alias == 'ENV_PRELOAD': + value = ear_so_file + elif alias == 'ENV_OUTPUT': + value = destination + environment.update({key: value}) + + return subprocess.call(command, env=environment) + + +def parse_exec_trace(filename): + """ Parse the file generated by the 'libear' preloaded library. + + Given filename points to a file which contains the basic report + generated by the interception library or wrapper command. A single + report file _might_ contain multiple process creation info. """ + + GS = chr(0x1d) + RS = chr(0x1e) + US = chr(0x1f) + with open(filename, 'r') as handler: + content = handler.read() + for group in filter(bool, content.split(GS)): + records = group.split(RS) + yield { + 'pid': records[0], + 'ppid': records[1], + 'function': records[2], + 'directory': records[3], + 'command': records[4].split(US)[:-1] + } + + +def format_entry(entry): + """ Generate the desired fields for compilation database entries. """ + + def join_command(args): + return ' '.join([shell_escape(arg) for arg in args]) + + def abspath(cwd, name): + """ Create normalized absolute path from input filename. """ + fullname = name if os.path.isabs(name) else os.path.join(cwd, name) + return os.path.normpath(fullname) + + atoms = classify_parameters(entry['command']) + if atoms['action'] <= Action.Compile: + for filename in atoms.get('files', []): + if is_source_file(filename): + yield { + 'directory': entry['directory'], + 'command': join_command(entry['command']), + 'file': abspath(entry['directory'], filename) + } + + +def shell_escape(arg): + """ Create a single string from list. + + The major challenge, to deal with white spaces. Which are used by + the shell as separator. (Eg.: -D_KEY="Value with spaces") """ + + def quote(arg): + table = {'\\': '\\\\', '"': '\\"', "'": "\\'"} + return '"' + ''.join([table.get(c, c) for c in arg]) + '"' + + return quote(arg) if len(shlex.split(arg)) > 1 else arg + + +def is_source_file(filename): + """ A predicate to decide the filename is a source file or not. """ + + accepted = { + '.c', '.C', '.cc', '.CC', '.cxx', '.cp', '.cpp', '.c++', '.m', '.mm', + '.i', '.ii', '.mii' + } + _, ext = os.path.splitext(filename) + return ext in accepted + + +def is_compiler_call(entry): + """ A predicate to decide the entry is a compiler call or not. """ + + patterns = [ + re.compile(r'^([^/]*/)*c(c|\+\+)$'), + re.compile(r'^([^/]*/)*([^-]*-)*g(cc|\+\+)(-\d+(\.\d+){0,2})?$'), + re.compile(r'^([^/]*/)*([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'), + re.compile(r'^([^/]*/)*llvm-g(cc|\+\+)$'), + ] + executable = entry['command'][0] + return any((pattern.match(executable) for pattern in patterns)) + + +def entry_hash(entry): + """ Implement unique hash method for compilation database entries. """ + + # For faster lookup in set filename is reverted + filename = entry['file'][::-1] + # For faster lookup in set directory is reverted + directory = entry['directory'][::-1] + # On OS X the 'cc' and 'c++' compilers are wrappers for + # 'clang' therefore both call would be logged. To avoid + # this the hash does not contain the first word of the + # command. + command = ' '.join(shlex.split(entry['command'])[1:]) + + return '<>'.join([filename, directory, command]) + + +if sys.version_info.major >= 3 and sys.version_info.minor >= 2: + from tempfile import TemporaryDirectory +else: + + class TemporaryDirectory(object): + """ This function creates a temporary directory using mkdtemp() (the + supplied arguments are passed directly to the underlying function). + The resulting object can be used as a context manager. On completion + of the context or destruction of the temporary directory object the + newly created temporary directory and all its contents are removed + from the filesystem. """ + + def __init__(self, **kwargs): + from tempfile import mkdtemp + self.name = mkdtemp(**kwargs) + + def __enter__(self): + return self.name + + def __exit__(self, _type, _value, _traceback): + self.cleanup() + + def cleanup(self): + from shutil import rmtree + if self.name is not None: + rmtree(self.name) Index: tools/scan-build-py/libscanbuild/options.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/options.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" This module implements a command line parser based on argparse. + +Since 'argparse' module is available only 2.7 and afterwards, this is +the major force to be compatible with newer versions only. """ + +import argparse +from libscanbuild import tempdir + +__all__ = ['create_parser'] + + +def create_parser(): + """ Parser factory method. """ + + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers( + dest='action', + help="""Run static analyzer against a build is done in multiple steps. + This controls which steps to take.""") + + everything = subparsers.add_parser( + 'all', + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + help="""Run the static analyzer against the given + build command.""") + + common_parameters(everything) + analyze_parameters(everything) + build_command(everything) + + intercept = subparsers.add_parser( + 'intercept', + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + help="""Only runs the build and write compilation database.""") + + common_parameters(intercept) + intercept_parameters(intercept) + build_command(intercept) + + analyze = subparsers.add_parser( + 'analyze', + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + help="""Only run the static analyzer against the given + compilation database.""") + + common_parameters(analyze) + analyze_parameters(analyze) + + return parser + + +def common_parameters(parser): + parser.add_argument( + '--verbose', '-v', + action='count', + default=0, + help="""Enable verbose output from '%(prog)s'. A second and third + '-v' increases verbosity.""") + parser.add_argument('--cdb', + metavar='', + default="compile_commands.json", + help="""The JSON compilation database.""") + + +def build_command(parser): + parser.add_argument(dest='build', + nargs=argparse.REMAINDER, + help="""Command to run.""") + + +def intercept_parameters(parser): + group = parser.add_mutually_exclusive_group() + group.add_argument( + '--append', + action='store_true', + help="""Append new entries to existing compilation database.""") + group.add_argument('--disable-filter', '-n', + dest='raw_entries', + action='store_true', + help="""Disable filter, unformated output.""") + + +def analyze_parameters(parser): + parser.add_argument( + '--output', '-o', + metavar='', + default=tempdir(), + help="""Specifies the output directory for analyzer reports. + Subdirectory will be created if default directory is targeted. + """) + parser.add_argument( + '--status-bugs', + action='store_true', + help="""By default, the exit status of '%(prog)s' is the same as the + executed build command. Specifying this option causes the exit + status of '%(prog)s' to be non zero if it found potential bugs + and zero otherwise.""") + parser.add_argument('--html-title', + metavar='', + help="""Specify the title used on generated HTML pages. + If not specified, a default title will be used.""") + parser.add_argument( + '--analyze-headers', + action='store_true', + help="""Also analyze functions in #included files. By default, such + functions are skipped unless they are called by functions + within the main source file.""") + format_group = parser.add_mutually_exclusive_group() + format_group.add_argument( + '--plist', + dest='output_format', + const='plist', + default='html', + action='store_const', + help="""This option outputs the results as a set of .plist files.""") + format_group.add_argument( + '--plist-html', + dest='output_format', + const='plist-html', + default='html', + action='store_const', + help="""This option outputs the results as a set of .html and .plist + files.""") + # TODO: implement '-view ' + + advanced = parser.add_argument_group('advanced options') + advanced.add_argument( + '--keep-empty', + action='store_true', + help="""Don't remove the build results directory even if no issues + were reported.""") + advanced.add_argument( + '--no-failure-reports', + dest='report_failures', + action='store_false', + help="""Do not create a 'failures' subdirectory that includes analyzer + crash reports and preprocessed source files.""") + advanced.add_argument( + '--stats', + action='store_true', + help="""Generates visitation statistics for the project being analyzed. + """) + advanced.add_argument('--internal-stats', + action='store_true', + help="""Generate internal analyzer statistics.""") + advanced.add_argument( + '--maxloop', + metavar='<loop count>', + type=int, + default=4, + help="""Specifiy the number of times a block can be visited before + giving up. Increase for more comprehensive coverage at a cost + of speed.""") + advanced.add_argument('--store', + metavar='<model>', + dest='store_model', + default='region', + choices=['region', 'basic'], + help="""Specify the store model used by the analyzer. + 'region' specifies a field- sensitive store model. + 'basic' which is far less precise but can more quickly + analyze code. 'basic' was the default store model for + checker-0.221 and earlier.""") + advanced.add_argument( + '--constraints', + metavar='<model>', + dest='constraints_model', + default='range', + choices=['range', 'basic'], + help="""Specify the contraint engine used by the analyzer. Specifying + 'basic' uses a simpler, less powerful constraint model used by + checker-0.160 and earlier.""") + advanced.add_argument( + '--use-analyzer', + metavar='<path>', + dest='clang', + default='clang', + help="""'%(prog)s' uses the 'clang' executable relative to itself for + static analysis. One can override this behavior with this + option by using the 'clang' packaged with Xcode (on OS X) or + from the PATH.""") + advanced.add_argument( + '--analyzer-config', + metavar='<options>', + help="""Provide options to pass through to the analyzer's + -analyzer-config flag. Several options are separated with + comma: 'key1=val1,key2=val2' + + Available options: + stable-report-filename=true or false (default) + + Switch the page naming to: + report-<filename>-<function/method name>-<id>.html + instead of report-XXXXXX.html""") + advanced.add_argument( + '--exclude', + metavar='<directory>', + dest='excludes', + action='append', + default=[], + help="""Do not run static analyzer against files found in this + directory. (You can specify this option multiple times.) + Could be usefull when project contains 3rd party libraries. + The directory path shall be absolute path as file names in + the compilation database.""") + + plugins = parser.add_argument_group('checker options') + plugins.add_argument( + '--load-plugin', + metavar='<plugin library>', + dest='plugins', + action='append', + help="""Loading external checkers using the clang plugin interface.""") + plugins.add_argument('--enable-checker', + metavar='<checker name>', + action='append', + help="""Enable specific checker.""") + plugins.add_argument('--disable-checker', + metavar='<checker name>', + action='append', + help="""Disable specific checker.""") + plugins.add_argument( + '--help-checkers', + action='store_true', + help="""A default group of checkers is run unless explicitly disabled. + Exactly which checkers constitute the default group is a + function of the operating system in use. These can be printed + with this flag.""") + plugins.add_argument( + '--help-checkers-verbose', + action='store_true', + help="""Print all available checkers and mark the enabled ones.""") Index: tools/scan-build-py/libscanbuild/report.py =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/report.py @@ -0,0 +1,499 @@ +# -*- coding: utf-8 -*- +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +""" This module is responsible to generate the "cover" report. + +The input for this step is the output directory, where individual reports +could be found. It parses those reports and generates a final HTML "cover" +report. """ + +import logging +import re +import os +import os.path +import sys +import json +import shutil +import glob +import pkg_resources +import plistlib +import itertools +from libscanbuild import duplicate_check +from libscanbuild.clang import get_version + +__all__ = ['document'] + + +def document(args, output_dir): + """ Generates cover report and returns the number of bugs/crashes. """ + + html_reports_available = args.output_format in {'html', 'plist-html'} + # count crashes and bugs + crash_count = sum(1 for _ in read_crashes(output_dir)) + bug_counter = create_counters() + for bug in read_bugs(output_dir, html_reports_available): + bug_counter(bug) + result = crash_count + bug_counter.total + # generate cover file when it's needed + if html_reports_available and result: + # generate common prefix for source files to have sort filenames + with open(args.cdb, 'r') as handle: + prefix = commonprefix(item['file'] for item in json.load(handle)) + # assemble the cover from multiple fragments + try: + fragments = [] + if bug_counter.total: + fragments.append(bug_summary(output_dir, bug_counter)) + fragments.append(bug_report(output_dir, prefix)) + if crash_count: + fragments.append(crash_report(output_dir, prefix)) + + assemble_cover(output_dir, prefix, args, fragments) + finally: + for fragment in fragments: + os.remove(fragment) + # copy additinal files to the report + copy_resource_files(output_dir) + shutil.copy(args.cdb, output_dir) + return result + + +def assemble_cover(output_dir, prefix, args, fragments): + """ Put together the fragments into a final report. """ + + import getpass + import socket + import datetime + + if args.html_title is None: + args.html_title = os.path.basename(prefix) + ' - analyzer results' + + with open(os.path.join(output_dir, 'index.html'), 'w') as handle: + indent = 0 + handle.write(reindent(""" + |<!DOCTYPE html> + |<html> + | <head> + | <title>{html_title} + | + | + | + | """, indent).format(html_title=args.html_title)) + handle.write(comment('SUMMARYENDHEAD')) + handle.write(reindent( + """ + | + |

{html_title}

+ | + | + | + | + | + | + |
User:{user_name}@{host_name}
Working Directory:{current_dir}
Command Line:{cmd_args}
Clang Version:{clang_version}
Date:{date}
""", + indent).format(html_title=args.html_title, + user_name=getpass.getuser(), + host_name=socket.gethostname(), + current_dir=prefix, + cmd_args=' '.join(sys.argv), + clang_version=get_version(args.clang), + date=datetime.datetime.today().strftime('%c'))) + for fragment in fragments: + # copy the content of fragments + with open(fragment, 'r') as input_handle: + for line in input_handle: + handle.write(line) + handle.write(reindent(""" + | + |""", indent)) + + +def bug_summary(output_dir, bug_counter): + """ Bug summary is a HTML table to give a better overview of the bugs. """ + + name = os.path.join(output_dir, 'summary.html.fragment') + with open(name, 'w') as handle: + indent = 4 + handle.write(reindent(""" + |

Bug Summary

+ | + | + | + | + | + | + | + | + | """, indent)) + handle.write(reindent(""" + | + | + | + | + | """, indent).format(bug_counter.total)) + for category, types in bug_counter.categories.items(): + handle.write(reindent(""" + | + | + | """, indent).format(category)) + for bug_type in types.values(): + handle.write(reindent(""" + | + | + | + | + | """, indent).format(**bug_type)) + handle.write(reindent(""" + | + |
Bug TypeQuantityDisplay?
All Bugs{0} + |
+ | + |
+ |
{0}
{bug_type}{bug_count} + |
+ | + |
+ |
""", indent)) + handle.write(comment('SUMMARYBUGEND')) + return name + + +def bug_report(output_dir, prefix): + """ Creates a fragment from the analyzer reports. """ + + pretty = prettify_bug(prefix, output_dir) + bugs = (pretty(bug) for bug in read_bugs(output_dir, True)) + + name = os.path.join(output_dir, 'bugs.html.fragment') + with open(name, 'w') as handle: + indent = 4 + handle.write(reindent(""" + |

Reports

+ | + | + | + | + | + | + | + | + | + | + | + | + | """, indent)) + handle.write(comment('REPORTBUGCOL')) + for current in bugs: + handle.write(reindent(""" + | + | + | + | + | + | + | + | + | """, indent).format(**current)) + handle.write(comment('REPORTBUG', {'id': current['report_file']})) + handle.write(reindent(""" + | + |
Bug Group + | Bug Type + |  ▾ + | FileFunction/MethodLinePath Length
{bug_category}{bug_type}{bug_file}{bug_function}{bug_line}{bug_path_length}View Report
""", indent)) + handle.write(comment('REPORTBUGEND')) + return name + + +def crash_report(output_dir, prefix): + """ Creates a fragment from the compiler crashes. """ + + pretty = prettify_crash(prefix, output_dir) + crashes = (pretty(crash) for crash in read_crashes(output_dir)) + + name = os.path.join(output_dir, 'crashes.html.fragment') + with open(name, 'w') as handle: + indent = 4 + handle.write(reindent(""" + |

Analyzer Failures

+ |

The analyzer had problems processing the following files:

+ | + | + | + | + | + | + | + | + | + | """, indent)) + for current in crashes: + handle.write(reindent(""" + | + | + | + | + | + | """, indent).format(**current)) + handle.write(comment('REPORTPROBLEM', current)) + handle.write(reindent(""" + | + |
ProblemSource FilePreprocessed FileSTDERR Output
{problem}{source}preprocessor outputanalyzer std err
""", indent)) + handle.write(comment('REPORTCRASHES')) + return name + + +def read_crashes(output_dir): + """ Generate a unique sequence of crashes from given output directory. """ + + return (parse_crash(filename) for filename in + glob.iglob(os.path.join(output_dir, 'failures', '*.info.txt'))) + + +def read_bugs(output_dir, html): + """ Generate a unique sequence of bugs from given output directory. + + Duplicates can be in a project if the same module was compiled multiple + times with different compiler options. These would be better to show in + the final report (cover) only once. """ + + parser = parse_bug_html if html else parse_bug_plist + pattern = '*.html' if html else '*.plist' + + duplicate = duplicate_check( + lambda bug: '{bug_line}.{bug_path_length}:{bug_file}'.format(**bug)) + + bugs = itertools.chain.from_iterable( + # parser creates a bug generator not the bug itself + parser(filename) for filename + in glob.iglob(os.path.join(output_dir, pattern))) + + return (bug for bug in bugs if not duplicate(bug)) + + +def parse_bug_plist(filename): + """ Returns the generator of bugs from a single .plist file. """ + + content = plistlib.readPlist(filename) + files = content.get('files') + for bug in content.get('diagnostics', []): + if len(files) <= int(bug['location']['file']): + logging.warning('Parsing bug from "%s" failed', filename) + continue + + yield { + 'result': filename, + 'bug_type': bug['type'], + 'bug_category': bug['category'], + 'bug_line': int(bug['location']['line']), + 'bug_bug_path_length': int(bug['location']['col']), + 'bug_file': files[int(bug['location']['file'])] + } + + +def parse_bug_html(filename): + """ Parse out the bug information from HTML output. """ + + patterns = [re.compile(r'$'), + re.compile(r'$'), + re.compile(r'$'), + re.compile(r'$'), + re.compile(r'$'), + re.compile(r'$'), + re.compile(r'$')] + endsign = re.compile(r'') + + bug = { + 'report_file': filename, + 'bug_function': 'n/a', # compatibility with < clang-3.5 + 'bug_category': 'Other', + 'bug_line': 0, + 'bug_path_length': 1 + } + + with open(filename) as handler: + for line in handler.readlines(): + # do not read the file further + if endsign.match(line): + break + # search for the right lines + for regex in patterns: + match = regex.match(line.strip()) + if match: + bug.update(match.groupdict()) + break + + encode_value(bug, 'bug_line', int) + encode_value(bug, 'bug_path_length', int) + + yield bug + + +def parse_crash(filename): + """ Parse out the crash information from the report file. """ + + match = re.match(r'(.*)\.info\.txt', filename) + name = match.group(1) if match else None + with open(filename) as handler: + lines = handler.readlines() + return { + 'source': lines[0].rstrip(), + 'problem': lines[1].rstrip(), + 'file': name, + 'info': name + '.info.txt', + 'stderr': name + '.stderr.txt' + } + + +def category_type_name(bug): + """ Create a new bug attribute from bug by category and type. + + The result will be used as CSS class selector in the final report. """ + + def smash(key): + """ Make value ready to be HTML attribute value. """ + + return bug.get(key, '').lower().replace(' ', '_').replace("'", '') + + return escape('bt_' + smash('bug_category') + '_' + smash('bug_type')) + + +def create_counters(): + """ Create counters for bug statistics. + + Two entries are maintained: 'total' is an integer, represents the + number of bugs. The 'categories' is a two level categorisation of bug + counters. The first level is 'bug category' the second is 'bug type'. + Each entry in this classification is a dictionary of 'count', 'type' + and 'label'. """ + + def predicate(bug): + bug_category = bug['bug_category'] + bug_type = bug['bug_type'] + current_category = predicate.categories.get(bug_category, dict()) + current_type = current_category.get(bug_type, { + 'bug_type': bug_type, + 'bug_type_class': category_type_name(bug), + 'bug_count': 0 + }) + current_type.update({'bug_count': current_type['bug_count'] + 1}) + current_category.update({bug_type: current_type}) + predicate.categories.update({bug_category: current_category}) + predicate.total += 1 + + predicate.total = 0 + predicate.categories = dict() + return predicate + + +def prettify_bug(prefix, output_dir): + def predicate(bug): + """ Make safe this values to embed into HTML. """ + + bug['bug_type_class'] = category_type_name(bug) + + encode_value(bug, 'bug_file', lambda x: escape(chop(prefix, x))) + encode_value(bug, 'bug_category', escape) + encode_value(bug, 'bug_type', escape) + encode_value(bug, 'report_file', lambda x: escape(chop(output_dir, x))) + return bug + + return predicate + + +def prettify_crash(prefix, output_dir): + def predicate(crash): + """ Make safe this values to embed into HTML. """ + + encode_value(crash, 'source', lambda x: escape(chop(prefix, x))) + encode_value(crash, 'problem', escape) + encode_value(crash, 'file', lambda x: escape(chop(output_dir, x))) + encode_value(crash, 'info', lambda x: escape(chop(output_dir, x))) + encode_value(crash, 'stderr', lambda x: escape(chop(output_dir, x))) + return crash + + return predicate + + +def copy_resource_files(output_dir): + """ Copy the javascript and css files to the report directory. """ + + this_package = 'libscanbuild' + resources_dir = pkg_resources.resource_filename(this_package, 'resources') + for resource in pkg_resources.resource_listdir(this_package, 'resources'): + shutil.copy(os.path.join(resources_dir, resource), output_dir) + + +def encode_value(container, key, encode): + """ Run 'encode' on 'container[key]' value and update it. """ + + if key in container: + value = encode(container[key]) + container.update({key: value}) + + +def chop(prefix, filename): + """ Create 'filename' from '/prefix/filename' """ + + if not len(prefix): + return filename + if prefix[-1] != os.path.sep: + prefix += os.path.sep + split = filename.split(prefix, 1) + return split[1] if len(split) == 2 else split[0] + + +def escape(text): + """ Paranoid HTML escape method. (Python version independent) """ + + escape_table = { + '&': '&', + '"': '"', + "'": ''', + '>': '>', + '<': '<' + } + return ''.join(escape_table.get(c, c) for c in text) + + +def reindent(text, indent): + """ Utility function to format html output and keep indentation. """ + + result = '' + for line in text.splitlines(): + if len(line.strip()): + result += ' ' * indent + line.split('|')[1] + os.linesep + return result + + +def comment(name, opts=dict()): + """ Utility function to format meta information as comment. """ + + attributes = '' + for key, value in opts.items(): + attributes += ' {0}="{1}"'.format(key, value) + + return '{2}'.format(name, attributes, os.linesep) + + +def commonprefix(files): + """ Fixed version of os.path.commonprefix. Return the longest path prefix + that is a prefix of all paths in filenames. """ + + result = None + for current in files: + if result is not None: + result = os.path.commonprefix([result, current]) + else: + result = current + + if result is None: + return '' + elif not os.path.isdir(result): + return os.path.dirname(result) + else: + return os.path.abspath(result) Index: tools/scan-build-py/libscanbuild/resources/scanview.css =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/resources/scanview.css @@ -0,0 +1,62 @@ +body { color:#000000; background-color:#ffffff } +body { font-family: Helvetica, sans-serif; font-size:9pt } +h1 { font-size: 14pt; } +h2 { font-size: 12pt; } +table { font-size:9pt } +table { border-spacing: 0px; border: 1px solid black } +th, table thead { + background-color:#eee; color:#666666; + font-weight: bold; cursor: default; + text-align:center; + font-weight: bold; font-family: Verdana; + white-space:nowrap; +} +.W { font-size:0px } +th, td { padding:5px; padding-left:8px; text-align:left } +td.SUMM_DESC { padding-left:12px } +td.DESC { white-space:pre } +td.Q { text-align:right } +td { text-align:left } +tbody.scrollContent { overflow:auto } + +table.form_group { + background-color: #ccc; + border: 1px solid #333; + padding: 2px; +} + +table.form_inner_group { + background-color: #ccc; + border: 1px solid #333; + padding: 0px; +} + +table.form { + background-color: #999; + border: 1px solid #333; + padding: 2px; +} + +td.form_label { + text-align: right; + vertical-align: top; +} +/* For one line entires */ +td.form_clabel { + text-align: right; + vertical-align: center; +} +td.form_value { + text-align: left; + vertical-align: top; +} +td.form_submit { + text-align: right; + vertical-align: top; +} + +h1.SubmitFail { + color: #f00; +} +h1.SubmitOk { +} Index: tools/scan-build-py/libscanbuild/resources/selectable.js =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/resources/selectable.js @@ -0,0 +1,47 @@ +function SetDisplay(RowClass, DisplayVal) +{ + var Rows = document.getElementsByTagName("tr"); + for ( var i = 0 ; i < Rows.length; ++i ) { + if (Rows[i].className == RowClass) { + Rows[i].style.display = DisplayVal; + } + } +} + +function CopyCheckedStateToCheckButtons(SummaryCheckButton) { + var Inputs = document.getElementsByTagName("input"); + for ( var i = 0 ; i < Inputs.length; ++i ) { + if (Inputs[i].type == "checkbox") { + if(Inputs[i] != SummaryCheckButton) { + Inputs[i].checked = SummaryCheckButton.checked; + Inputs[i].onclick(); + } + } + } +} + +function returnObjById( id ) { + if (document.getElementById) + var returnVar = document.getElementById(id); + else if (document.all) + var returnVar = document.all[id]; + else if (document.layers) + var returnVar = document.layers[id]; + return returnVar; +} + +var NumUnchecked = 0; + +function ToggleDisplay(CheckButton, ClassName) { + if (CheckButton.checked) { + SetDisplay(ClassName, ""); + if (--NumUnchecked == 0) { + returnObjById("AllBugsCheck").checked = true; + } + } + else { + SetDisplay(ClassName, "none"); + NumUnchecked++; + returnObjById("AllBugsCheck").checked = false; + } +} Index: tools/scan-build-py/libscanbuild/resources/sorttable.js =================================================================== --- /dev/null +++ tools/scan-build-py/libscanbuild/resources/sorttable.js @@ -0,0 +1,492 @@ +/* + SortTable + version 2 + 7th April 2007 + Stuart Langridge, http://www.kryogenix.org/code/browser/sorttable/ + + Instructions: + Download this file + Add to your HTML + Add class="sortable" to any table you'd like to make sortable + Click on the headers to sort + + Thanks to many, many people for contributions and suggestions. + Licenced as X11: http://www.kryogenix.org/code/browser/licence.html + This basically means: do what you want with it. +*/ + + +var stIsIE = /*@cc_on!@*/false; + +sorttable = { + init: function() { + // quit if this function has already been called + if (arguments.callee.done) return; + // flag this function so we don't do the same thing twice + arguments.callee.done = true; + // kill the timer + if (_timer) clearInterval(_timer); + + if (!document.createElement || !document.getElementsByTagName) return; + + sorttable.DATE_RE = /^(\d\d?)[\/\.-](\d\d?)[\/\.-]((\d\d)?\d\d)$/; + + forEach(document.getElementsByTagName('table'), function(table) { + if (table.className.search(/\bsortable\b/) != -1) { + sorttable.makeSortable(table); + } + }); + + }, + + makeSortable: function(table) { + if (table.getElementsByTagName('thead').length == 0) { + // table doesn't have a tHead. Since it should have, create one and + // put the first table row in it. + the = document.createElement('thead'); + the.appendChild(table.rows[0]); + table.insertBefore(the,table.firstChild); + } + // Safari doesn't support table.tHead, sigh + if (table.tHead == null) table.tHead = table.getElementsByTagName('thead')[0]; + + if (table.tHead.rows.length != 1) return; // can't cope with two header rows + + // Sorttable v1 put rows with a class of "sortbottom" at the bottom (as + // "total" rows, for example). This is B&R, since what you're supposed + // to do is put them in a tfoot. So, if there are sortbottom rows, + // for backward compatibility, move them to tfoot (creating it if needed). + sortbottomrows = []; + for (var i=0; i5' : ' ▴'; + this.appendChild(sortrevind); + return; + } + if (this.className.search(/\bsorttable_sorted_reverse\b/) != -1) { + // if we're already sorted by this column in reverse, just + // re-reverse the table, which is quicker + sorttable.reverse(this.sorttable_tbody); + this.className = this.className.replace('sorttable_sorted_reverse', + 'sorttable_sorted'); + this.removeChild(document.getElementById('sorttable_sortrevind')); + sortfwdind = document.createElement('span'); + sortfwdind.id = "sorttable_sortfwdind"; + sortfwdind.innerHTML = stIsIE ? ' 6' : ' ▾'; + this.appendChild(sortfwdind); + return; + } + + // remove sorttable_sorted classes + theadrow = this.parentNode; + forEach(theadrow.childNodes, function(cell) { + if (cell.nodeType == 1) { // an element + cell.className = cell.className.replace('sorttable_sorted_reverse',''); + cell.className = cell.className.replace('sorttable_sorted',''); + } + }); + sortfwdind = document.getElementById('sorttable_sortfwdind'); + if (sortfwdind) { sortfwdind.parentNode.removeChild(sortfwdind); } + sortrevind = document.getElementById('sorttable_sortrevind'); + if (sortrevind) { sortrevind.parentNode.removeChild(sortrevind); } + + this.className += ' sorttable_sorted'; + sortfwdind = document.createElement('span'); + sortfwdind.id = "sorttable_sortfwdind"; + sortfwdind.innerHTML = stIsIE ? ' 6' : ' ▾'; + this.appendChild(sortfwdind); + + // build an array to sort. This is a Schwartzian transform thing, + // i.e., we "decorate" each row with the actual sort key, + // sort based on the sort keys, and then put the rows back in order + // which is a lot faster because you only do getInnerText once per row + row_array = []; + col = this.sorttable_columnindex; + rows = this.sorttable_tbody.rows; + for (var j=0; j 12) { + // definitely dd/mm + return sorttable.sort_ddmm; + } else if (second > 12) { + return sorttable.sort_mmdd; + } else { + // looks like a date, but we can't tell which, so assume + // that it's dd/mm (English imperialism!) and keep looking + sortfn = sorttable.sort_ddmm; + } + } + } + } + return sortfn; + }, + + getInnerText: function(node) { + // gets the text we want to use for sorting for a cell. + // strips leading and trailing whitespace. + // this is *not* a generic getInnerText function; it's special to sorttable. + // for example, you can override the cell text with a customkey attribute. + // it also gets .value for fields. + + hasInputs = (typeof node.getElementsByTagName == 'function') && + node.getElementsByTagName('input').length; + + if (node.getAttribute("sorttable_customkey") != null) { + return node.getAttribute("sorttable_customkey"); + } + else if (typeof node.textContent != 'undefined' && !hasInputs) { + return node.textContent.replace(/^\s+|\s+$/g, ''); + } + else if (typeof node.innerText != 'undefined' && !hasInputs) { + return node.innerText.replace(/^\s+|\s+$/g, ''); + } + else if (typeof node.text != 'undefined' && !hasInputs) { + return node.text.replace(/^\s+|\s+$/g, ''); + } + else { + switch (node.nodeType) { + case 3: + if (node.nodeName.toLowerCase() == 'input') { + return node.value.replace(/^\s+|\s+$/g, ''); + } + case 4: + return node.nodeValue.replace(/^\s+|\s+$/g, ''); + break; + case 1: + case 11: + var innerText = ''; + for (var i = 0; i < node.childNodes.length; i++) { + innerText += sorttable.getInnerText(node.childNodes[i]); + } + return innerText.replace(/^\s+|\s+$/g, ''); + break; + default: + return ''; + } + } + }, + + reverse: function(tbody) { + // reverse the rows in a tbody + newrows = []; + for (var i=0; i=0; i--) { + tbody.appendChild(newrows[i]); + } + delete newrows; + }, + + /* sort functions + each sort function takes two parameters, a and b + you are comparing a[0] and b[0] */ + sort_numeric: function(a,b) { + aa = parseFloat(a[0].replace(/[^0-9.-]/g,'')); + if (isNaN(aa)) aa = 0; + bb = parseFloat(b[0].replace(/[^0-9.-]/g,'')); + if (isNaN(bb)) bb = 0; + return aa-bb; + }, + sort_alpha: function(a,b) { + if (a[0]==b[0]) return 0; + if (a[0] 0 ) { + var q = list[i]; list[i] = list[i+1]; list[i+1] = q; + swap = true; + } + } // for + t--; + + if (!swap) break; + + for(var i = t; i > b; --i) { + if ( comp_func(list[i], list[i-1]) < 0 ) { + var q = list[i]; list[i] = list[i-1]; list[i-1] = q; + swap = true; + } + } // for + b++; + + } // while(swap) + } +} + +/* ****************************************************************** + Supporting functions: bundled here to avoid depending on a library + ****************************************************************** */ + +// Dean Edwards/Matthias Miller/John Resig + +/* for Mozilla/Opera9 */ +if (document.addEventListener) { + document.addEventListener("DOMContentLoaded", sorttable.init, false); +} + +/* for Internet Explorer */ +/*@cc_on @*/ +/*@if (@_win32) + document.write(" - | - | """, indent).format(html_title=args.html_title)) - handle.write(comment('SUMMARYENDHEAD')) - handle.write(reindent( - """ - | - |

{html_title}

- | - | - | - | - | - | - |
User:{user_name}@{host_name}
Working Directory:{current_dir}
Command Line:{cmd_args}
Clang Version:{clang_version}
Date:{date}
""", - indent).format(html_title=args.html_title, - user_name=getpass.getuser(), - host_name=socket.gethostname(), - current_dir=prefix, - cmd_args=' '.join(sys.argv), - clang_version=get_version(args.clang), - date=datetime.datetime.today().strftime('%c'))) - for fragment in fragments: - # copy the content of fragments - with open(fragment, 'r') as input_handle: - for line in input_handle: - handle.write(line) - handle.write(reindent(""" - | - |""", indent)) - - -def bug_summary(output_dir, bug_counter): - """ Bug summary is a HTML table to give a better overview of the bugs. """ - - name = os.path.join(output_dir, 'summary.html.fragment') - with open(name, 'w') as handle: - indent = 4 - handle.write(reindent(""" - |

Bug Summary

- | - | - | - | - | - | - | - | - | """, indent)) - handle.write(reindent(""" - | - | - | - | - | """, indent).format(bug_counter.total)) - for category, types in bug_counter.categories.items(): - handle.write(reindent(""" - | - | - | """, indent).format(category)) - for bug_type in types.values(): - handle.write(reindent(""" - | - | - | - | - | """, indent).format(**bug_type)) - handle.write(reindent(""" - | - |
Bug TypeQuantityDisplay?
All Bugs{0} - |
- | - |
- |
{0}
{bug_type}{bug_count} - |
- | - |
- |
""", indent)) - handle.write(comment('SUMMARYBUGEND')) - return name - - -def bug_report(output_dir, prefix): - """ Creates a fragment from the analyzer reports. """ - - pretty = prettify_bug(prefix, output_dir) - bugs = (pretty(bug) for bug in read_bugs(output_dir, True)) - - name = os.path.join(output_dir, 'bugs.html.fragment') - with open(name, 'w') as handle: - indent = 4 - handle.write(reindent(""" - |

Reports

- | - | - | - | - | - | - | - | - | - | - | - | - | """, indent)) - handle.write(comment('REPORTBUGCOL')) - for current in bugs: - handle.write(reindent(""" - | - | - | - | - | - | - | - | - | """, indent).format(**current)) - handle.write(comment('REPORTBUG', {'id': current['report_file']})) - handle.write(reindent(""" - | - |
Bug Group - | Bug Type - |  ▾ - | FileFunction/MethodLinePath Length
{bug_category}{bug_type}{bug_file}{bug_function}{bug_line}{bug_path_length}View Report
""", indent)) - handle.write(comment('REPORTBUGEND')) - return name - - -def crash_report(output_dir, prefix): - """ Creates a fragment from the compiler crashes. """ - - pretty = prettify_crash(prefix, output_dir) - crashes = (pretty(crash) for crash in read_crashes(output_dir)) - - name = os.path.join(output_dir, 'crashes.html.fragment') - with open(name, 'w') as handle: - indent = 4 - handle.write(reindent(""" - |

Analyzer Failures

- |

The analyzer had problems processing the following files:

- | - | - | - | - | - | - | - | - | - | """, indent)) - for current in crashes: - handle.write(reindent(""" - | - | - | - | - | - | """, indent).format(**current)) - handle.write(comment('REPORTPROBLEM', current)) - handle.write(reindent(""" - | - |
ProblemSource FilePreprocessed FileSTDERR Output
{problem}{source}preprocessor outputanalyzer std err
""", indent)) - handle.write(comment('REPORTCRASHES')) - return name - - -def read_crashes(output_dir): - """ Generate a unique sequence of crashes from given output directory. """ - - return (parse_crash(filename) for filename in - glob.iglob(os.path.join(output_dir, 'failures', '*.info.txt'))) - - -def read_bugs(output_dir, html): - """ Generate a unique sequence of bugs from given output directory. - - Duplicates can be in a project if the same module was compiled multiple - times with different compiler options. These would be better to show in - the final report (cover) only once. """ - - parser = parse_bug_html if html else parse_bug_plist - pattern = '*.html' if html else '*.plist' - - duplicate = duplicate_check( - lambda bug: '{bug_line}.{bug_path_length}:{bug_file}'.format(**bug)) - - bugs = itertools.chain.from_iterable( - # parser creates a bug generator not the bug itself - parser(filename) for filename - in glob.iglob(os.path.join(output_dir, pattern))) - - return (bug for bug in bugs if not duplicate(bug)) - - -def parse_bug_plist(filename): - """ Returns the generator of bugs from a single .plist file. """ - - content = plistlib.readPlist(filename) - files = content.get('files') - for bug in content.get('diagnostics', []): - if len(files) <= int(bug['location']['file']): - logging.warning('Parsing bug from "%s" failed', filename) - continue - - yield { - 'result': filename, - 'bug_type': bug['type'], - 'bug_category': bug['category'], - 'bug_line': int(bug['location']['line']), - 'bug_bug_path_length': int(bug['location']['col']), - 'bug_file': files[int(bug['location']['file'])] - } - - -def parse_bug_html(filename): - """ Parse out the bug information from HTML output. """ - - patterns = [re.compile(r'$'), - re.compile(r'$'), - re.compile(r'$'), - re.compile(r'$'), - re.compile(r'$'), - re.compile(r'$'), - re.compile(r'$')] - endsign = re.compile(r'') - - bug = { - 'report_file': filename, - 'bug_function': 'n/a', # compatibility with < clang-3.5 - 'bug_category': 'Other', - 'bug_line': 0, - 'bug_path_length': 1 - } - - with open(filename) as handler: - for line in handler.readlines(): - # do not read the file further - if endsign.match(line): - break - # search for the right lines - for regex in patterns: - match = regex.match(line.strip()) - if match: - bug.update(match.groupdict()) - break - - encode_value(bug, 'bug_line', int) - encode_value(bug, 'bug_path_length', int) - - yield bug - - -def parse_crash(filename): - """ Parse out the crash information from the report file. """ - - match = re.match(r'(.*)\.info\.txt', filename) - name = match.group(1) if match else None - with open(filename) as handler: - lines = handler.readlines() - return { - 'source': lines[0].rstrip(), - 'problem': lines[1].rstrip(), - 'file': name, - 'info': name + '.info.txt', - 'stderr': name + '.stderr.txt' - } - - -def category_type_name(bug): - """ Create a new bug attribute from bug by category and type. - - The result will be used as CSS class selector in the final report. """ - - def smash(key): - """ Make value ready to be HTML attribute value. """ - - return bug.get(key, '').lower().replace(' ', '_').replace("'", '') - - return escape('bt_' + smash('bug_category') + '_' + smash('bug_type')) - - -def create_counters(): - """ Create counters for bug statistics. - - Two entries are maintained: 'total' is an integer, represents the - number of bugs. The 'categories' is a two level categorisation of bug - counters. The first level is 'bug category' the second is 'bug type'. - Each entry in this classification is a dictionary of 'count', 'type' - and 'label'. """ - - def predicate(bug): - bug_category = bug['bug_category'] - bug_type = bug['bug_type'] - current_category = predicate.categories.get(bug_category, dict()) - current_type = current_category.get(bug_type, { - 'bug_type': bug_type, - 'bug_type_class': category_type_name(bug), - 'bug_count': 0 - }) - current_type.update({'bug_count': current_type['bug_count'] + 1}) - current_category.update({bug_type: current_type}) - predicate.categories.update({bug_category: current_category}) - predicate.total += 1 - - predicate.total = 0 - predicate.categories = dict() - return predicate - - -def prettify_bug(prefix, output_dir): - def predicate(bug): - """ Make safe this values to embed into HTML. """ - - bug['bug_type_class'] = category_type_name(bug) - - encode_value(bug, 'bug_file', lambda x: escape(chop(prefix, x))) - encode_value(bug, 'bug_category', escape) - encode_value(bug, 'bug_type', escape) - encode_value(bug, 'report_file', lambda x: escape(chop(output_dir, x))) - return bug - - return predicate - - -def prettify_crash(prefix, output_dir): - def predicate(crash): - """ Make safe this values to embed into HTML. """ - - encode_value(crash, 'source', lambda x: escape(chop(prefix, x))) - encode_value(crash, 'problem', escape) - encode_value(crash, 'file', lambda x: escape(chop(output_dir, x))) - encode_value(crash, 'info', lambda x: escape(chop(output_dir, x))) - encode_value(crash, 'stderr', lambda x: escape(chop(output_dir, x))) - return crash - - return predicate - - -def copy_resource_files(output_dir): - """ Copy the javascript and css files to the report directory. """ - - this_package = 'libscanbuild' - resources_dir = pkg_resources.resource_filename(this_package, 'resources') - for resource in pkg_resources.resource_listdir(this_package, 'resources'): - shutil.copy(os.path.join(resources_dir, resource), output_dir) - - -def encode_value(container, key, encode): - """ Run 'encode' on 'container[key]' value and update it. """ - - if key in container: - value = encode(container[key]) - container.update({key: value}) - - -def chop(prefix, filename): - """ Create 'filename' from '/prefix/filename' """ - - if not len(prefix): - return filename - if prefix[-1] != os.path.sep: - prefix += os.path.sep - split = filename.split(prefix, 1) - return split[1] if len(split) == 2 else split[0] - - -def escape(text): - """ Paranoid HTML escape method. (Python version independent) """ - - escape_table = { - '&': '&', - '"': '"', - "'": ''', - '>': '>', - '<': '<' - } - return ''.join(escape_table.get(c, c) for c in text) - - -def reindent(text, indent): - """ Utility function to format html output and keep indentation. """ - - result = '' - for line in text.splitlines(): - if len(line.strip()): - result += ' ' * indent + line.split('|')[1] + os.linesep - return result - - -def comment(name, opts=dict()): - """ Utility function to format meta information as comment. """ - - attributes = '' - for key, value in opts.items(): - attributes += ' {0}="{1}"'.format(key, value) - - return '{2}'.format(name, attributes, os.linesep) - - -def commonprefix(files): - """ Fixed version of os.path.commonprefix. Return the longest path prefix - that is a prefix of all paths in filenames. """ - - result = None - for current in files: - if result is not None: - result = os.path.commonprefix([result, current]) - else: - result = current - - if result is None: - return '' - elif not os.path.isdir(result): - return os.path.dirname(result) - else: - return os.path.abspath(result) Index: tools/scan-build2/libscanbuild/resources/scanview.css =================================================================== --- tools/scan-build2/libscanbuild/resources/scanview.css +++ /dev/null @@ -1,62 +0,0 @@ -body { color:#000000; background-color:#ffffff } -body { font-family: Helvetica, sans-serif; font-size:9pt } -h1 { font-size: 14pt; } -h2 { font-size: 12pt; } -table { font-size:9pt } -table { border-spacing: 0px; border: 1px solid black } -th, table thead { - background-color:#eee; color:#666666; - font-weight: bold; cursor: default; - text-align:center; - font-weight: bold; font-family: Verdana; - white-space:nowrap; -} -.W { font-size:0px } -th, td { padding:5px; padding-left:8px; text-align:left } -td.SUMM_DESC { padding-left:12px } -td.DESC { white-space:pre } -td.Q { text-align:right } -td { text-align:left } -tbody.scrollContent { overflow:auto } - -table.form_group { - background-color: #ccc; - border: 1px solid #333; - padding: 2px; -} - -table.form_inner_group { - background-color: #ccc; - border: 1px solid #333; - padding: 0px; -} - -table.form { - background-color: #999; - border: 1px solid #333; - padding: 2px; -} - -td.form_label { - text-align: right; - vertical-align: top; -} -/* For one line entires */ -td.form_clabel { - text-align: right; - vertical-align: center; -} -td.form_value { - text-align: left; - vertical-align: top; -} -td.form_submit { - text-align: right; - vertical-align: top; -} - -h1.SubmitFail { - color: #f00; -} -h1.SubmitOk { -} Index: tools/scan-build2/libscanbuild/resources/selectable.js =================================================================== --- tools/scan-build2/libscanbuild/resources/selectable.js +++ /dev/null @@ -1,47 +0,0 @@ -function SetDisplay(RowClass, DisplayVal) -{ - var Rows = document.getElementsByTagName("tr"); - for ( var i = 0 ; i < Rows.length; ++i ) { - if (Rows[i].className == RowClass) { - Rows[i].style.display = DisplayVal; - } - } -} - -function CopyCheckedStateToCheckButtons(SummaryCheckButton) { - var Inputs = document.getElementsByTagName("input"); - for ( var i = 0 ; i < Inputs.length; ++i ) { - if (Inputs[i].type == "checkbox") { - if(Inputs[i] != SummaryCheckButton) { - Inputs[i].checked = SummaryCheckButton.checked; - Inputs[i].onclick(); - } - } - } -} - -function returnObjById( id ) { - if (document.getElementById) - var returnVar = document.getElementById(id); - else if (document.all) - var returnVar = document.all[id]; - else if (document.layers) - var returnVar = document.layers[id]; - return returnVar; -} - -var NumUnchecked = 0; - -function ToggleDisplay(CheckButton, ClassName) { - if (CheckButton.checked) { - SetDisplay(ClassName, ""); - if (--NumUnchecked == 0) { - returnObjById("AllBugsCheck").checked = true; - } - } - else { - SetDisplay(ClassName, "none"); - NumUnchecked++; - returnObjById("AllBugsCheck").checked = false; - } -} Index: tools/scan-build2/libscanbuild/resources/sorttable.js =================================================================== --- tools/scan-build2/libscanbuild/resources/sorttable.js +++ /dev/null @@ -1,492 +0,0 @@ -/* - SortTable - version 2 - 7th April 2007 - Stuart Langridge, http://www.kryogenix.org/code/browser/sorttable/ - - Instructions: - Download this file - Add to your HTML - Add class="sortable" to any table you'd like to make sortable - Click on the headers to sort - - Thanks to many, many people for contributions and suggestions. - Licenced as X11: http://www.kryogenix.org/code/browser/licence.html - This basically means: do what you want with it. -*/ - - -var stIsIE = /*@cc_on!@*/false; - -sorttable = { - init: function() { - // quit if this function has already been called - if (arguments.callee.done) return; - // flag this function so we don't do the same thing twice - arguments.callee.done = true; - // kill the timer - if (_timer) clearInterval(_timer); - - if (!document.createElement || !document.getElementsByTagName) return; - - sorttable.DATE_RE = /^(\d\d?)[\/\.-](\d\d?)[\/\.-]((\d\d)?\d\d)$/; - - forEach(document.getElementsByTagName('table'), function(table) { - if (table.className.search(/\bsortable\b/) != -1) { - sorttable.makeSortable(table); - } - }); - - }, - - makeSortable: function(table) { - if (table.getElementsByTagName('thead').length == 0) { - // table doesn't have a tHead. Since it should have, create one and - // put the first table row in it. - the = document.createElement('thead'); - the.appendChild(table.rows[0]); - table.insertBefore(the,table.firstChild); - } - // Safari doesn't support table.tHead, sigh - if (table.tHead == null) table.tHead = table.getElementsByTagName('thead')[0]; - - if (table.tHead.rows.length != 1) return; // can't cope with two header rows - - // Sorttable v1 put rows with a class of "sortbottom" at the bottom (as - // "total" rows, for example). This is B&R, since what you're supposed - // to do is put them in a tfoot. So, if there are sortbottom rows, - // for backward compatibility, move them to tfoot (creating it if needed). - sortbottomrows = []; - for (var i=0; i5' : ' ▴'; - this.appendChild(sortrevind); - return; - } - if (this.className.search(/\bsorttable_sorted_reverse\b/) != -1) { - // if we're already sorted by this column in reverse, just - // re-reverse the table, which is quicker - sorttable.reverse(this.sorttable_tbody); - this.className = this.className.replace('sorttable_sorted_reverse', - 'sorttable_sorted'); - this.removeChild(document.getElementById('sorttable_sortrevind')); - sortfwdind = document.createElement('span'); - sortfwdind.id = "sorttable_sortfwdind"; - sortfwdind.innerHTML = stIsIE ? ' 6' : ' ▾'; - this.appendChild(sortfwdind); - return; - } - - // remove sorttable_sorted classes - theadrow = this.parentNode; - forEach(theadrow.childNodes, function(cell) { - if (cell.nodeType == 1) { // an element - cell.className = cell.className.replace('sorttable_sorted_reverse',''); - cell.className = cell.className.replace('sorttable_sorted',''); - } - }); - sortfwdind = document.getElementById('sorttable_sortfwdind'); - if (sortfwdind) { sortfwdind.parentNode.removeChild(sortfwdind); } - sortrevind = document.getElementById('sorttable_sortrevind'); - if (sortrevind) { sortrevind.parentNode.removeChild(sortrevind); } - - this.className += ' sorttable_sorted'; - sortfwdind = document.createElement('span'); - sortfwdind.id = "sorttable_sortfwdind"; - sortfwdind.innerHTML = stIsIE ? ' 6' : ' ▾'; - this.appendChild(sortfwdind); - - // build an array to sort. This is a Schwartzian transform thing, - // i.e., we "decorate" each row with the actual sort key, - // sort based on the sort keys, and then put the rows back in order - // which is a lot faster because you only do getInnerText once per row - row_array = []; - col = this.sorttable_columnindex; - rows = this.sorttable_tbody.rows; - for (var j=0; j 12) { - // definitely dd/mm - return sorttable.sort_ddmm; - } else if (second > 12) { - return sorttable.sort_mmdd; - } else { - // looks like a date, but we can't tell which, so assume - // that it's dd/mm (English imperialism!) and keep looking - sortfn = sorttable.sort_ddmm; - } - } - } - } - return sortfn; - }, - - getInnerText: function(node) { - // gets the text we want to use for sorting for a cell. - // strips leading and trailing whitespace. - // this is *not* a generic getInnerText function; it's special to sorttable. - // for example, you can override the cell text with a customkey attribute. - // it also gets .value for fields. - - hasInputs = (typeof node.getElementsByTagName == 'function') && - node.getElementsByTagName('input').length; - - if (node.getAttribute("sorttable_customkey") != null) { - return node.getAttribute("sorttable_customkey"); - } - else if (typeof node.textContent != 'undefined' && !hasInputs) { - return node.textContent.replace(/^\s+|\s+$/g, ''); - } - else if (typeof node.innerText != 'undefined' && !hasInputs) { - return node.innerText.replace(/^\s+|\s+$/g, ''); - } - else if (typeof node.text != 'undefined' && !hasInputs) { - return node.text.replace(/^\s+|\s+$/g, ''); - } - else { - switch (node.nodeType) { - case 3: - if (node.nodeName.toLowerCase() == 'input') { - return node.value.replace(/^\s+|\s+$/g, ''); - } - case 4: - return node.nodeValue.replace(/^\s+|\s+$/g, ''); - break; - case 1: - case 11: - var innerText = ''; - for (var i = 0; i < node.childNodes.length; i++) { - innerText += sorttable.getInnerText(node.childNodes[i]); - } - return innerText.replace(/^\s+|\s+$/g, ''); - break; - default: - return ''; - } - } - }, - - reverse: function(tbody) { - // reverse the rows in a tbody - newrows = []; - for (var i=0; i=0; i--) { - tbody.appendChild(newrows[i]); - } - delete newrows; - }, - - /* sort functions - each sort function takes two parameters, a and b - you are comparing a[0] and b[0] */ - sort_numeric: function(a,b) { - aa = parseFloat(a[0].replace(/[^0-9.-]/g,'')); - if (isNaN(aa)) aa = 0; - bb = parseFloat(b[0].replace(/[^0-9.-]/g,'')); - if (isNaN(bb)) bb = 0; - return aa-bb; - }, - sort_alpha: function(a,b) { - if (a[0]==b[0]) return 0; - if (a[0] 0 ) { - var q = list[i]; list[i] = list[i+1]; list[i+1] = q; - swap = true; - } - } // for - t--; - - if (!swap) break; - - for(var i = t; i > b; --i) { - if ( comp_func(list[i], list[i-1]) < 0 ) { - var q = list[i]; list[i] = list[i-1]; list[i-1] = q; - swap = true; - } - } // for - b++; - - } // while(swap) - } -} - -/* ****************************************************************** - Supporting functions: bundled here to avoid depending on a library - ****************************************************************** */ - -// Dean Edwards/Matthias Miller/John Resig - -/* for Mozilla/Opera9 */ -if (document.addEventListener) { - document.addEventListener("DOMContentLoaded", sorttable.init, false); -} - -/* for Internet Explorer */ -/*@cc_on @*/ -/*@if (@_win32) - document.write("