Index: MicroBenchmarks/libs/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/CMakeLists.txt +++ MicroBenchmarks/libs/CMakeLists.txt @@ -1,3 +1,3 @@ -add_subdirectory(benchmark-1.1.0) +add_subdirectory(benchmark-1.2.0) test_suite_add_build_dependencies(benchmark) test_suite_add_build_dependencies(output_test_helper) Index: MicroBenchmarks/libs/benchmark-1.1.0/.clang-format =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/.clang-format @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -... - Index: MicroBenchmarks/libs/benchmark-1.1.0/.gitignore =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/.gitignore @@ -1,46 +0,0 @@ -*.a -*.so -*.so.?* -*.dll -*.exe -*.dylib -*.cmake -!/cmake/*.cmake -*~ -*.pyc -__pycache__ - -# lcov -*.lcov -/lcov - -# cmake files. -/Testing -CMakeCache.txt -CMakeFiles/ -cmake_install.cmake - -# makefiles. -Makefile - -# in-source build. -bin/ -lib/ -/test/*_test - -# exuberant ctags. -tags - -# YouCompleteMe configuration. -.ycm_extra_conf.pyc - -# ninja generated files. -.ninja_deps -.ninja_log -build.ninja -install_manifest.txt -rules.ninja - -# out-of-source build top-level folders. -build/ -_build/ Index: MicroBenchmarks/libs/benchmark-1.1.0/.travis.yml =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/.travis.yml +++ /dev/null @@ -1,102 +0,0 @@ -sudo: required -dist: trusty -language: cpp - -env: - global: - - /usr/local/bin:$PATH - -# NOTE: The COMPILER variable is unused. It simply makes the display on -# travis-ci.org more readable. -matrix: - include: - - compiler: gcc - addons: - apt: - packages: - - lcov - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release - - compiler: gcc - addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-6 - env: - - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug - - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release - # Clang w/ libc++ - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 - - EXTRA_FLAGS="-stdlib=libc++" - # Clang w/ libc++, ASAN, UBSAN - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" - - EXTRA_FLAGS="-stdlib=libc++ -fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fno-sanitize-recover=all" - - UBSAN_OPTIONS=print_stacktrace=1 - # Clang w/ libc++ and MSAN - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" - # Clang w/ libc++ and MSAN - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo - - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" - -before_script: - - if [ -n "${LIBCXX_BUILD}" ]; then - source .travis-libcxx-setup.sh; - fi - - mkdir build && cd build - -install: - - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then - PATH=~/.local/bin:${PATH}; - pip install --user --upgrade pip; - pip install --user cpp-coveralls; - fi - -script: - - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" .. - - make - - make CTEST_OUTPUT_ON_FAILURE=1 test - -after_success: - - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then - coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .; - fi - -sudo: required Index: MicroBenchmarks/libs/benchmark-1.1.0/.ycm_extra_conf.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/.ycm_extra_conf.py @@ -1,115 +0,0 @@ -import os -import ycm_core - -# These are the compilation flags that will be used in case there's no -# compilation database set (by default, one is not set). -# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. -flags = [ -'-Wall', -'-Werror', -'-pendantic-errors', -'-std=c++0x', -'-fno-strict-aliasing', -'-O3', -'-DNDEBUG', -# ...and the same thing goes for the magic -x option which specifies the -# language that the files to be compiled are written in. This is mostly -# relevant for c++ headers. -# For a C project, you would set this to 'c' instead of 'c++'. -'-x', 'c++', -'-I', 'include', -'-isystem', '/usr/include', -'-isystem', '/usr/local/include', -] - - -# Set this to the absolute path to the folder (NOT the file!) containing the -# compile_commands.json file to use that instead of 'flags'. See here for -# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html -# -# Most projects will NOT need to set this to anything; you can just change the -# 'flags' list of compilation flags. Notice that YCM itself uses that approach. -compilation_database_folder = '' - -if os.path.exists( compilation_database_folder ): - database = ycm_core.CompilationDatabase( compilation_database_folder ) -else: - database = None - -SOURCE_EXTENSIONS = [ '.cc' ] - -def DirectoryOfThisScript(): - return os.path.dirname( os.path.abspath( __file__ ) ) - - -def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): - if not working_directory: - return list( flags ) - new_flags = [] - make_next_absolute = False - path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] - for flag in flags: - new_flag = flag - - if make_next_absolute: - make_next_absolute = False - if not flag.startswith( '/' ): - new_flag = os.path.join( working_directory, flag ) - - for path_flag in path_flags: - if flag == path_flag: - make_next_absolute = True - break - - if flag.startswith( path_flag ): - path = flag[ len( path_flag ): ] - new_flag = path_flag + os.path.join( working_directory, path ) - break - - if new_flag: - new_flags.append( new_flag ) - return new_flags - - -def IsHeaderFile( filename ): - extension = os.path.splitext( filename )[ 1 ] - return extension in [ '.h', '.hxx', '.hpp', '.hh' ] - - -def GetCompilationInfoForFile( filename ): - # The compilation_commands.json file generated by CMake does not have entries - # for header files. So we do our best by asking the db for flags for a - # corresponding source file, if any. If one exists, the flags for that file - # should be good enough. - if IsHeaderFile( filename ): - basename = os.path.splitext( filename )[ 0 ] - for extension in SOURCE_EXTENSIONS: - replacement_file = basename + extension - if os.path.exists( replacement_file ): - compilation_info = database.GetCompilationInfoForFile( - replacement_file ) - if compilation_info.compiler_flags_: - return compilation_info - return None - return database.GetCompilationInfoForFile( filename ) - - -def FlagsForFile( filename, **kwargs ): - if database: - # Bear in mind that compilation_info.compiler_flags_ does NOT return a - # python list, but a "list-like" StringVec object - compilation_info = GetCompilationInfoForFile( filename ) - if not compilation_info: - return None - - final_flags = MakeRelativePathsInFlagsAbsolute( - compilation_info.compiler_flags_, - compilation_info.compiler_working_dir_ ) - else: - relative_to = DirectoryOfThisScript() - final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) - - return { - 'flags': final_flags, - 'do_cache': True - } Index: MicroBenchmarks/libs/benchmark-1.1.0/CONTRIBUTING.md =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/CONTRIBUTING.md @@ -1,58 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - -Once your CLA is submitted (or if you already submitted one for -another Google project), make a commit adding yourself to the -[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part -of your first [pull request][]. - -[AUTHORS]: AUTHORS -[CONTRIBUTORS]: CONTRIBUTORS - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[pull request]: https://help.github.com/articles/creating-a-pull-request Index: MicroBenchmarks/libs/benchmark-1.1.0/LICENSE =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/LICENSE @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/AddCXXCompilerFlag.cmake =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/cmake/AddCXXCompilerFlag.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# - Adds a compiler flag if it is supported by the compiler -# -# This function checks that the supplied compiler flag is supported and then -# adds it to the corresponding compiler flags -# -# add_cxx_compiler_flag( []) -# -# - Example -# -# include(AddCXXCompilerFlag) -# add_cxx_compiler_flag(-Wall) -# add_cxx_compiler_flag(-no-strict-aliasing RELEASE) -# Requires CMake 2.6+ - -if(__add_cxx_compiler_flag) - return() -endif() -set(__add_cxx_compiler_flag INCLUDED) - -include(CheckCXXCompilerFlag) - -function(add_cxx_compiler_flag FLAG) - string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) - string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) - string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) - string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) - set(CMAKE_REQUIRED_FLAGS "${FLAG}") - check_cxx_compiler_flag("${FLAG}" ${SANITIZED_FLAG}) - if(${SANITIZED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) - string(TOUPPER "_${VARIANT}" VARIANT) - endif() - set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) - endif() -endfunction() - Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/GetGitVersion.cmake =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/cmake/GetGitVersion.cmake @@ -1,51 +0,0 @@ -# - Returns a version string from Git tags -# -# This function inspects the annotated git tags for the project and returns a string -# into a CMake variable -# -# get_git_version() -# -# - Example -# -# include(GetGitVersion) -# get_git_version(GIT_VERSION) -# -# Requires CMake 2.8.11+ -find_package(Git) - -if(__get_git_version) - return() -endif() -set(__get_git_version INCLUDED) - -function(get_git_version var) - if(GIT_EXECUTABLE) - execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 - RESULT_VARIABLE status - OUTPUT_VARIABLE GIT_VERSION - ERROR_QUIET) - if(${status}) - set(GIT_VERSION "v0.0.0") - else() - string(STRIP ${GIT_VERSION} GIT_VERSION) - string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) - endif() - - # Work out if the repository is dirty - execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh - OUTPUT_QUIET - ERROR_QUIET) - execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- - OUTPUT_VARIABLE GIT_DIFF_INDEX - ERROR_QUIET) - string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) - if (${GIT_DIRTY}) - set(GIT_VERSION "${GIT_VERSION}-dirty") - endif() - else() - set(GIT_VERSION "v0.0.0") - endif() - - message("-- git Version: ${GIT_VERSION}") - set(${var} ${GIT_VERSION} PARENT_SCOPE) -endfunction() Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/gnu_posix_regex.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/cmake/gnu_posix_regex.cpp @@ -1,12 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; -} - Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/posix_regex.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/cmake/posix_regex.cpp @@ -1,14 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; - regfree(&re); - return ret; -} - Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/std_regex.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/cmake/std_regex.cpp @@ -1,10 +0,0 @@ -#include -#include -int main() { - const std::string str = "test0159"; - std::regex re; - re = std::regex("^[a-z]+[0-9]+$", - std::regex_constants::extended | std::regex_constants::nosubs); - return std::regex_search(str, re) ? 0 : -1; -} - Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/steady_clock.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/cmake/steady_clock.cpp @@ -1,7 +0,0 @@ -#include - -int main() { - typedef std::chrono::steady_clock Clock; - Clock::time_point tp = Clock::now(); - ((void)tp); -} Index: MicroBenchmarks/libs/benchmark-1.1.0/cmake/thread_safety_attributes.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/cmake/thread_safety_attributes.cpp @@ -1,4 +0,0 @@ -#define HAVE_THREAD_SAFETY_ATTRIBUTES -#include "../src/mutex.h" - -int main() {} Index: MicroBenchmarks/libs/benchmark-1.1.0/include/benchmark/macros.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/include/benchmark/macros.h +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#ifndef BENCHMARK_MACROS_H_ -#define BENCHMARK_MACROS_H_ - -#if __cplusplus >= 201103L -#define BENCHMARK_HAS_CXX11 -#endif - -#ifndef BENCHMARK_HAS_CXX11 -#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&); \ - TypeName& operator=(const TypeName&) -#else -#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ - TypeName(const TypeName&) = delete; \ - TypeName& operator=(const TypeName&) = delete -#endif - -#if defined(__GNUC__) -#define BENCHMARK_UNUSED __attribute__((unused)) -#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#elif defined(_MSC_VER) && !defined(__clang__) -#define BENCHMARK_UNUSED -#define BENCHMARK_ALWAYS_INLINE __forceinline -#if _MSC_VER >= 1900 -#define BENCHMARK_NOEXCEPT noexcept -#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) -#else -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif -#define __func__ __FUNCTION__ -#else -#define BENCHMARK_UNUSED -#define BENCHMARK_ALWAYS_INLINE -#define BENCHMARK_NOEXCEPT -#define BENCHMARK_NOEXCEPT_OP(x) -#endif - -#if defined(__GNUC__) -#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) -#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) -#else -#define BENCHMARK_BUILTIN_EXPECT(x, y) x -#define BENCHMARK_DEPRECATED_MSG(msg) -#endif - -#if defined(__GNUC__) && !defined(__clang__) -#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#endif - -#endif // BENCHMARK_MACROS_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/include/benchmark/reporter.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/include/benchmark/reporter.h +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#ifndef BENCHMARK_REPORTER_H_ -#define BENCHMARK_REPORTER_H_ - -#include -#include -#include -#include -#include - -#include "benchmark_api.h" // For forward declaration of BenchmarkReporter - -namespace benchmark { - -// Interface for custom benchmark result printers. -// By default, benchmark reports are printed to stdout. However an application -// can control the destination of the reports by calling -// RunSpecifiedBenchmarks and passing it a custom reporter object. -// The reporter object must implement the following interface. -class BenchmarkReporter { - public: - struct Context { - int num_cpus; - double mhz_per_cpu; - bool cpu_scaling_enabled; - - // The number of chars in the longest benchmark name. - size_t name_field_width; - }; - - struct Run { - Run() - : error_occurred(false), - iterations(1), - time_unit(kNanosecond), - real_accumulated_time(0), - cpu_accumulated_time(0), - bytes_per_second(0), - items_per_second(0), - max_heapbytes_used(0), - complexity(oNone), - complexity_lambda(), - complexity_n(0), - report_big_o(false), - report_rms(false) {} - - std::string benchmark_name; - std::string report_label; // Empty if not set by benchmark. - bool error_occurred; - std::string error_message; - - int64_t iterations; - TimeUnit time_unit; - double real_accumulated_time; - double cpu_accumulated_time; - - // Return a value representing the real time per iteration in the unit - // specified by 'time_unit'. - // NOTE: If 'iterations' is zero the returned value represents the - // accumulated time. - double GetAdjustedRealTime() const; - - // Return a value representing the cpu time per iteration in the unit - // specified by 'time_unit'. - // NOTE: If 'iterations' is zero the returned value represents the - // accumulated time. - double GetAdjustedCPUTime() const; - - // Zero if not set by benchmark. - double bytes_per_second; - double items_per_second; - - // This is set to 0.0 if memory tracing is not enabled. - double max_heapbytes_used; - - // Keep track of arguments to compute asymptotic complexity - BigO complexity; - BigOFunc* complexity_lambda; - int complexity_n; - - // Inform print function whether the current run is a complexity report - bool report_big_o; - bool report_rms; - }; - - // Construct a BenchmarkReporter with the output stream set to 'std::cout' - // and the error stream set to 'std::cerr' - BenchmarkReporter(); - - // Called once for every suite of benchmarks run. - // The parameter "context" contains information that the - // reporter may wish to use when generating its report, for example the - // platform under which the benchmarks are running. The benchmark run is - // never started if this function returns false, allowing the reporter - // to skip runs based on the context information. - virtual bool ReportContext(const Context& context) = 0; - - // Called once for each group of benchmark runs, gives information about - // cpu-time and heap memory usage during the benchmark run. If the group - // of runs contained more than two entries then 'report' contains additional - // elements representing the mean and standard deviation of those runs. - // Additionally if this group of runs was the last in a family of benchmarks - // 'reports' contains additional entries representing the asymptotic - // complexity and RMS of that benchmark family. - virtual void ReportRuns(const std::vector& report) = 0; - - // Called once and only once after ever group of benchmarks is run and - // reported. - virtual void Finalize() {} - - // REQUIRES: The object referenced by 'out' is valid for the lifetime - // of the reporter. - void SetOutputStream(std::ostream* out) { - assert(out); - output_stream_ = out; - } - - // REQUIRES: The object referenced by 'err' is valid for the lifetime - // of the reporter. - void SetErrorStream(std::ostream* err) { - assert(err); - error_stream_ = err; - } - - std::ostream& GetOutputStream() const { return *output_stream_; } - - std::ostream& GetErrorStream() const { return *error_stream_; } - - virtual ~BenchmarkReporter(); - - // Write a human readable string to 'out' representing the specified - // 'context'. - // REQUIRES: 'out' is non-null. - static void PrintBasicContext(std::ostream* out, Context const& context); - - private: - std::ostream* output_stream_; - std::ostream* error_stream_; -}; - -// Simple reporter that outputs benchmark data to the console. This is the -// default reporter used by RunSpecifiedBenchmarks(). -class ConsoleReporter : public BenchmarkReporter { - public: - enum OutputOptions { OO_None, OO_Color }; - explicit ConsoleReporter(OutputOptions color_output = OO_Color) - : name_field_width_(0), color_output_(color_output == OO_Color) {} - - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - - protected: - virtual void PrintRunData(const Run& report); - size_t name_field_width_; - - private: - bool color_output_; -}; - -class JSONReporter : public BenchmarkReporter { - public: - JSONReporter() : first_report_(true) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - virtual void Finalize(); - - private: - void PrintRunData(const Run& report); - - bool first_report_; -}; - -class CSVReporter : public BenchmarkReporter { - public: - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - - private: - void PrintRunData(const Run& report); -}; - -inline const char* GetTimeUnitString(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return "ms"; - case kMicrosecond: - return "us"; - case kNanosecond: - default: - return "ns"; - } -} - -inline double GetTimeUnitMultiplier(TimeUnit unit) { - switch (unit) { - case kMillisecond: - return 1e3; - case kMicrosecond: - return 1e6; - case kNanosecond: - default: - return 1e9; - } -} - -} // end namespace benchmark -#endif // BENCHMARK_REPORTER_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/mingw.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/mingw.py @@ -1,320 +0,0 @@ -#! /usr/bin/env python -# encoding: utf-8 - -import argparse -import errno -import logging -import os -import platform -import re -import sys -import subprocess -import tempfile - -try: - import winreg -except ImportError: - import _winreg as winreg -try: - import urllib.request as request -except ImportError: - import urllib as request -try: - import urllib.parse as parse -except ImportError: - import urlparse as parse - -class EmptyLogger(object): - ''' - Provides an implementation that performs no logging - ''' - def debug(self, *k, **kw): - pass - def info(self, *k, **kw): - pass - def warn(self, *k, **kw): - pass - def error(self, *k, **kw): - pass - def critical(self, *k, **kw): - pass - def setLevel(self, *k, **kw): - pass - -urls = ( - 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20' - 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/' - 'repository.txt', - 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/' - 'repository.txt' -) -''' -A list of mingw-build repositories -''' - -def repository(urls = urls, log = EmptyLogger()): - ''' - Downloads and parse mingw-build repository files and parses them - ''' - log.info('getting mingw-builds repository') - versions = {} - re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') - re_sub = r'http://downloads.sourceforge.net/project/\1' - for url in urls: - log.debug(' - requesting: %s', url) - socket = request.urlopen(url) - repo = socket.read() - if not isinstance(repo, str): - repo = repo.decode(); - socket.close() - for entry in repo.split('\n')[:-1]: - value = entry.split('|') - version = tuple([int(n) for n in value[0].strip().split('.')]) - version = versions.setdefault(version, {}) - arch = value[1].strip() - if arch == 'x32': - arch = 'i686' - elif arch == 'x64': - arch = 'x86_64' - arch = version.setdefault(arch, {}) - threading = arch.setdefault(value[2].strip(), {}) - exceptions = threading.setdefault(value[3].strip(), {}) - revision = exceptions.setdefault(int(value[4].strip()[3:]), - re_sourceforge.sub(re_sub, value[5].strip())) - return versions - -def find_in_path(file, path=None): - ''' - Attempts to find an executable in the path - ''' - if platform.system() == 'Windows': - file += '.exe' - if path is None: - path = os.environ.get('PATH', '') - if type(path) is type(''): - path = path.split(os.pathsep) - return list(filter(os.path.exists, - map(lambda dir, file=file: os.path.join(dir, file), path))) - -def find_7zip(log = EmptyLogger()): - ''' - Attempts to find 7zip for unpacking the mingw-build archives - ''' - log.info('finding 7zip') - path = find_in_path('7z') - if not path: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip') - path, _ = winreg.QueryValueEx(key, 'Path') - path = [os.path.join(path, '7z.exe')] - log.debug('found \'%s\'', path[0]) - return path[0] - -find_7zip() - -def unpack(archive, location, log = EmptyLogger()): - ''' - Unpacks a mingw-builds archive - ''' - sevenzip = find_7zip(log) - log.info('unpacking %s', os.path.basename(archive)) - cmd = [sevenzip, 'x', archive, '-o' + location, '-y'] - log.debug(' - %r', cmd) - with open(os.devnull, 'w') as devnull: - subprocess.check_call(cmd, stdout = devnull) - -def download(url, location, log = EmptyLogger()): - ''' - Downloads and unpacks a mingw-builds archive - ''' - log.info('downloading MinGW') - log.debug(' - url: %s', url) - log.debug(' - location: %s', location) - - re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*') - - stream = request.urlopen(url) - try: - content = stream.getheader('Content-Disposition') or '' - except AttributeError: - content = stream.headers.getheader('Content-Disposition') or '' - matches = re_content.match(content) - if matches: - filename = matches.group(2) - else: - parsed = parse.urlparse(stream.geturl()) - filename = os.path.basename(parsed.path) - - try: - os.makedirs(location) - except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir(location): - pass - else: - raise - - archive = os.path.join(location, filename) - with open(archive, 'wb') as out: - while True: - buf = stream.read(1024) - if not buf: - break - out.write(buf) - unpack(archive, location, log = log) - os.remove(archive) - - possible = os.path.join(location, 'mingw64') - if not os.path.exists(possible): - possible = os.path.join(location, 'mingw32') - if not os.path.exists(possible): - raise ValueError('Failed to find unpacked MinGW: ' + possible) - return possible - -def root(location = None, arch = None, version = None, threading = None, - exceptions = None, revision = None, log = EmptyLogger()): - ''' - Returns the root folder of a specific version of the mingw-builds variant - of gcc. Will download the compiler if needed - ''' - - # Get the repository if we don't have all the information - if not (arch and version and threading and exceptions and revision): - versions = repository(log = log) - - # Determine some defaults - version = version or max(versions.keys()) - if not arch: - arch = platform.machine().lower() - if arch == 'x86': - arch = 'i686' - elif arch == 'amd64': - arch = 'x86_64' - if not threading: - keys = versions[version][arch].keys() - if 'posix' in keys: - threading = 'posix' - elif 'win32' in keys: - threading = 'win32' - else: - threading = keys[0] - if not exceptions: - keys = versions[version][arch][threading].keys() - if 'seh' in keys: - exceptions = 'seh' - elif 'sjlj' in keys: - exceptions = 'sjlj' - else: - exceptions = keys[0] - if revision == None: - revision = max(versions[version][arch][threading][exceptions].keys()) - if not location: - location = os.path.join(tempfile.gettempdir(), 'mingw-builds') - - # Get the download url - url = versions[version][arch][threading][exceptions][revision] - - # Tell the user whatzzup - log.info('finding MinGW %s', '.'.join(str(v) for v in version)) - log.debug(' - arch: %s', arch) - log.debug(' - threading: %s', threading) - log.debug(' - exceptions: %s', exceptions) - log.debug(' - revision: %s', revision) - log.debug(' - url: %s', url) - - # Store each specific revision differently - slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}' - slug = slug.format( - version = '.'.join(str(v) for v in version), - arch = arch, - threading = threading, - exceptions = exceptions, - revision = revision - ) - if arch == 'x86_64': - root_dir = os.path.join(location, slug, 'mingw64') - elif arch == 'i686': - root_dir = os.path.join(location, slug, 'mingw32') - else: - raise ValueError('Unknown MinGW arch: ' + arch) - - # Download if needed - if not os.path.exists(root_dir): - downloaded = download(url, os.path.join(location, slug), log = log) - if downloaded != root_dir: - raise ValueError('The location of mingw did not match\n%s\n%s' - % (downloaded, root_dir)) - - return root_dir - -def str2ver(string): - ''' - Converts a version string into a tuple - ''' - try: - version = tuple(int(v) for v in string.split('.')) - if len(version) is not 3: - raise ValueError() - except ValueError: - raise argparse.ArgumentTypeError( - 'please provide a three digit version string') - return version - -def main(): - ''' - Invoked when the script is run directly by the python interpreter - ''' - parser = argparse.ArgumentParser( - description = 'Downloads a specific version of MinGW', - formatter_class = argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument('--location', - help = 'the location to download the compiler to', - default = os.path.join(tempfile.gettempdir(), 'mingw-builds')) - parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'], - help = 'the target MinGW architecture string') - parser.add_argument('--version', type = str2ver, - help = 'the version of GCC to download') - parser.add_argument('--threading', choices = ['posix', 'win32'], - help = 'the threading type of the compiler') - parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'], - help = 'the method to throw exceptions') - parser.add_argument('--revision', type=int, - help = 'the revision of the MinGW release') - group = parser.add_mutually_exclusive_group() - group.add_argument('-v', '--verbose', action='store_true', - help='increase the script output verbosity') - group.add_argument('-q', '--quiet', action='store_true', - help='only print errors and warning') - args = parser.parse_args() - - # Create the logger - logger = logging.getLogger('mingw') - handler = logging.StreamHandler() - formatter = logging.Formatter('%(message)s') - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.INFO) - if args.quiet: - logger.setLevel(logging.WARN) - if args.verbose: - logger.setLevel(logging.DEBUG) - - # Get MinGW - root_dir = root(location = args.location, arch = args.arch, - version = args.version, threading = args.threading, - exceptions = args.exceptions, revision = args.revision, - log = logger) - - sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin')) - -if __name__ == '__main__': - try: - main() - except IOError as e: - sys.stderr.write('IO error: %s\n' % e) - sys.exit(1) - except OSError as e: - sys.stderr.write('OS error: %s\n' % e) - sys.exit(1) - except KeyboardInterrupt as e: - sys.stderr.write('Killed\n') - sys.exit(1) Index: MicroBenchmarks/libs/benchmark-1.1.0/src/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/src/CMakeLists.txt +++ /dev/null @@ -1,44 +0,0 @@ -# Allow the source files to find headers in src/ -include_directories(${PROJECT_SOURCE_DIR}/src) - -if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) - list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) - list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) -endif() - -file(GLOB - SOURCE_FILES - *.cc - ${PROJECT_SOURCE_DIR}/include/benchmark/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/*.h) - -add_library(benchmark ${SOURCE_FILES}) -set_target_properties(benchmark PROPERTIES - OUTPUT_NAME "benchmark" - VERSION ${GENERIC_LIB_VERSION} - SOVERSION ${GENERIC_LIB_SOVERSION} -) - -# Link threads. -target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) - -# We need extra libraries on Windows -if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - target_link_libraries(benchmark Shlwapi) -endif() - -# Expose public API -target_include_directories(benchmark PUBLIC ${PROJECT_SOURCE_DIR}/include) - -# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) -install( - TARGETS benchmark - ARCHIVE DESTINATION lib - LIBRARY DESTINATION lib - RUNTIME DESTINATION bin - COMPONENT library) - -install( - DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" - DESTINATION include - FILES_MATCHING PATTERN "*.*h") Index: MicroBenchmarks/libs/benchmark-1.1.0/src/arraysize.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/arraysize.h @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_ARRAYSIZE_H_ -#define BENCHMARK_ARRAYSIZE_H_ - -#include "internal_macros.h" - -namespace benchmark { -namespace internal { -// The arraysize(arr) macro returns the # of elements in an array arr. -// The expression is a compile-time constant, and therefore can be -// used in defining new arrays, for example. If you use arraysize on -// a pointer by mistake, you will get a compile-time error. -// - -// This template function declaration is used in defining arraysize. -// Note that the function doesn't need an implementation, as we only -// use its type. -template -char (&ArraySizeHelper(T (&array)[N]))[N]; - -// That gcc wants both of these prototypes seems mysterious. VC, for -// its part, can't decide which to use (another mystery). Matching of -// template overloads: the final frontier. -#ifndef COMPILER_MSVC -template -char (&ArraySizeHelper(const T (&array)[N]))[N]; -#endif - -#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) - -} // end namespace internal -} // end namespace benchmark - -#endif // BENCHMARK_ARRAYSIZE_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/colorprint.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/colorprint.h @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_COLORPRINT_H_ -#define BENCHMARK_COLORPRINT_H_ - -#include -#include -#include - -namespace benchmark { -enum LogColor { - COLOR_DEFAULT, - COLOR_RED, - COLOR_GREEN, - COLOR_YELLOW, - COLOR_BLUE, - COLOR_MAGENTA, - COLOR_CYAN, - COLOR_WHITE -}; - -std::string FormatString(const char* msg, va_list args); -std::string FormatString(const char* msg, ...); - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args); -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); - -// Returns true if stdout appears to be a terminal that supports colored -// output, false otherwise. -bool IsColorTerminal(); - -} // end namespace benchmark - -#endif // BENCHMARK_COLORPRINT_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/commandlineflags.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/commandlineflags.h @@ -1,79 +0,0 @@ -#ifndef BENCHMARK_COMMANDLINEFLAGS_H_ -#define BENCHMARK_COMMANDLINEFLAGS_H_ - -#include -#include - -// Macro for referencing flags. -#define FLAG(name) FLAGS_##name - -// Macros for declaring flags. -#define DECLARE_bool(name) extern bool FLAG(name) -#define DECLARE_int32(name) extern int32_t FLAG(name) -#define DECLARE_int64(name) extern int64_t FLAG(name) -#define DECLARE_double(name) extern double FLAG(name) -#define DECLARE_string(name) extern std::string FLAG(name) - -// Macros for defining flags. -#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val) -#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val) -#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val) -#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val) -#define DEFINE_string(name, default_val, doc) \ - std::string FLAG(name) = (default_val) - -namespace benchmark { -// Parses 'str' for a 32-bit signed integer. If successful, writes the result -// to *value and returns true; otherwise leaves *value unchanged and returns -// false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value); - -// Parses a bool/Int32/string from the environment variable -// corresponding to the given Google Test flag. -bool BoolFromEnv(const char* flag, bool default_val); -int32_t Int32FromEnv(const char* flag, int32_t default_val); -double DoubleFromEnv(const char* flag, double default_val); -const char* StringFromEnv(const char* flag, const char* default_val); - -// Parses a string for a bool flag, in the form of either -// "--flag=value" or "--flag". -// -// In the former case, the value is taken as true if it passes IsTruthyValue(). -// -// In the latter case, the value is taken as true. -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseBoolFlag(const char* str, const char* flag, bool* value); - -// Parses a string for an Int32 flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); - -// Parses a string for a Double flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseDoubleFlag(const char* str, const char* flag, double* value); - -// Parses a string for a string flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseStringFlag(const char* str, const char* flag, std::string* value); - -// Returns true if the string matches the flag. -bool IsFlag(const char* str, const char* flag); - -// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or -// some non-alphanumeric character. As a special case, also returns true if -// value is the empty string. -bool IsTruthyFlagValue(const std::string& value); -} // end namespace benchmark - -#endif // BENCHMARK_COMMANDLINEFLAGS_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/mutex.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/mutex.h @@ -1,155 +0,0 @@ -#ifndef BENCHMARK_MUTEX_H_ -#define BENCHMARK_MUTEX_H_ - -#include -#include - -#include "check.h" - -// Enable thread safety attributes only with clang. -// The attributes can be safely erased when compiling with other compilers. -#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op -#endif - -#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) - -#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) - -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) - -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) - -#define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) - -#define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) - -#define REQUIRES(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) - -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) - -#define ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) - -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) - -#define RELEASE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) - -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) - -#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) - -#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) - -#define ASSERT_SHARED_CAPABILITY(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) - -#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) - -#define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) - -namespace benchmark { - -typedef std::condition_variable Condition; - -// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that -// we can annotate them with thread safety attributes and use the -// -Wthread-safety warning with clang. The standard library types cannot be -// used directly because they do not provided the required annotations. -class CAPABILITY("mutex") Mutex { - public: - Mutex() {} - - void lock() ACQUIRE() { mut_.lock(); } - void unlock() RELEASE() { mut_.unlock(); } - std::mutex& native_handle() { return mut_; } - - private: - std::mutex mut_; -}; - -class SCOPED_CAPABILITY MutexLock { - typedef std::unique_lock MutexLockImp; - - public: - MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} - ~MutexLock() RELEASE() {} - MutexLockImp& native_handle() { return ml_; } - - private: - MutexLockImp ml_; -}; - -class Barrier { - public: - Barrier(int num_threads) : running_threads_(num_threads) {} - - // Called by each thread - bool wait() EXCLUDES(lock_) { - bool last_thread = false; - { - MutexLock ml(lock_); - last_thread = createBarrier(ml); - } - if (last_thread) phase_condition_.notify_all(); - return last_thread; - } - - void removeThread() EXCLUDES(lock_) { - MutexLock ml(lock_); - --running_threads_; - if (entered_ != 0) phase_condition_.notify_all(); - } - - private: - Mutex lock_; - Condition phase_condition_; - int running_threads_; - - // State for barrier management - int phase_number_ = 0; - int entered_ = 0; // Number of threads that have entered this barrier - - // Enter the barrier and wait until all other threads have also - // entered the barrier. Returns iff this is the last thread to - // enter the barrier. - bool createBarrier(MutexLock& ml) REQUIRES(lock_) { - CHECK_LT(entered_, running_threads_); - entered_++; - if (entered_ < running_threads_) { - // Wait for all threads to enter - int phase_number_cp = phase_number_; - auto cb = [this, phase_number_cp]() { - return this->phase_number_ > phase_number_cp || - entered_ == running_threads_; // A thread has aborted in error - }; - phase_condition_.wait(ml.native_handle(), cb); - if (phase_number_ > phase_number_cp) return false; - // else (running_threads_ == entered_) and we are the last thread. - } - // Last thread has reached the barrier - phase_number_++; - entered_ = 0; - return true; - } -}; - -} // end namespace benchmark - -#endif // BENCHMARK_MUTEX_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/sleep.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/src/sleep.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef BENCHMARK_SLEEP_H_ -#define BENCHMARK_SLEEP_H_ - -#include - -namespace benchmark { -const int64_t kNumMillisPerSecond = 1000LL; -const int64_t kNumMicrosPerMilli = 1000LL; -const int64_t kNumMicrosPerSecond = kNumMillisPerSecond * 1000LL; -const int64_t kNumNanosPerMicro = 1000LL; -const int64_t kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; - -void SleepForMilliseconds(int milliseconds); -void SleepForSeconds(double seconds); -} // end namespace benchmark - -#endif // BENCHMARK_SLEEP_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/string_util.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/string_util.h @@ -1,40 +0,0 @@ -#ifndef BENCHMARK_STRING_UTIL_H_ -#define BENCHMARK_STRING_UTIL_H_ - -#include -#include -#include -#include "internal_macros.h" - -namespace benchmark { - -void AppendHumanReadable(int n, std::string* str); - -std::string HumanReadableNumber(double n); - -std::string StringPrintF(const char* format, ...); - -inline std::ostream& StringCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { - return out; -} - -template -inline std::ostream& StringCatImp(std::ostream& out, First&& f, - Rest&&... rest) { - out << std::forward(f); - return StringCatImp(out, std::forward(rest)...); -} - -template -inline std::string StrCat(Args&&... args) { - std::ostringstream ss; - StringCatImp(ss, std::forward(args)...); - return ss.str(); -} - -void ReplaceAll(std::string* str, const std::string& from, - const std::string& to); - -} // end namespace benchmark - -#endif // BENCHMARK_STRING_UTIL_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/sysinfo.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/sysinfo.h @@ -1,10 +0,0 @@ -#ifndef BENCHMARK_SYSINFO_H_ -#define BENCHMARK_SYSINFO_H_ - -namespace benchmark { -int NumCPUs(); -double CyclesPerSecond(); -bool CpuScalingEnabled(); -} // end namespace benchmark - -#endif // BENCHMARK_SYSINFO_H_ Index: MicroBenchmarks/libs/benchmark-1.1.0/src/timers.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/src/timers.h @@ -1,48 +0,0 @@ -#ifndef BENCHMARK_TIMERS_H -#define BENCHMARK_TIMERS_H - -#include -#include - -namespace benchmark { - -// Return the CPU usage of the current process -double ProcessCPUUsage(); - -// Return the CPU usage of the children of the current process -double ChildrenCPUUsage(); - -// Return the CPU usage of the current thread -double ThreadCPUUsage(); - -#if defined(HAVE_STEADY_CLOCK) -template -struct ChooseSteadyClock { - typedef std::chrono::high_resolution_clock type; -}; - -template <> -struct ChooseSteadyClock { - typedef std::chrono::steady_clock type; -}; -#endif - -struct ChooseClockType { -#if defined(HAVE_STEADY_CLOCK) - typedef ChooseSteadyClock<>::type type; -#else - typedef std::chrono::high_resolution_clock type; -#endif -}; - -inline double ChronoClockNow() { - typedef ChooseClockType::type ClockType; - using FpSeconds = std::chrono::duration; - return FpSeconds(ClockType::now().time_since_epoch()).count(); -} - -std::string LocalDateTimeString(); - -} // end namespace benchmark - -#endif // BENCHMARK_TIMERS_H Index: MicroBenchmarks/libs/benchmark-1.1.0/test/filter_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/test/filter_test.cc @@ -1,104 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace { - -class TestReporter : public benchmark::ConsoleReporter { - public: - virtual bool ReportContext(const Context& context) { - return ConsoleReporter::ReportContext(context); - }; - - virtual void ReportRuns(const std::vector& report) { - ++count_; - ConsoleReporter::ReportRuns(report); - }; - - TestReporter() : count_(0) {} - - virtual ~TestReporter() {} - - size_t GetCount() const { return count_; } - - private: - mutable size_t count_; -}; - -} // end namespace - -static void NoPrefix(benchmark::State& state) { - while (state.KeepRunning()) { - } -} -BENCHMARK(NoPrefix); - -static void BM_Foo(benchmark::State& state) { - while (state.KeepRunning()) { - } -} -BENCHMARK(BM_Foo); - -static void BM_Bar(benchmark::State& state) { - while (state.KeepRunning()) { - } -} -BENCHMARK(BM_Bar); - -static void BM_FooBar(benchmark::State& state) { - while (state.KeepRunning()) { - } -} -BENCHMARK(BM_FooBar); - -static void BM_FooBa(benchmark::State& state) { - while (state.KeepRunning()) { - } -} -BENCHMARK(BM_FooBa); - -int main(int argc, char **argv) { - bool list_only = false; - for (int i = 0; i < argc; ++i) - list_only |= std::string(argv[i]).find("--benchmark_list_tests") != - std::string::npos; - - benchmark::Initialize(&argc, argv); - - TestReporter test_reporter; - const size_t returned_count = - benchmark::RunSpecifiedBenchmarks(&test_reporter); - - if (argc == 2) { - // Make sure we ran all of the tests - std::stringstream ss(argv[1]); - size_t expected_return; - ss >> expected_return; - - if (returned_count != expected_return) { - std::cerr << "ERROR: Expected " << expected_return - << " tests to match the filter but returned_count = " - << returned_count << std::endl; - return -1; - } - - const size_t expected_reports = list_only ? 0 : expected_return; - const size_t reports_count = test_reporter.GetCount(); - if (reports_count != expected_reports) { - std::cerr << "ERROR: Expected " << expected_reports - << " tests to be run but reported_count = " << reports_count - << std::endl; - return -1; - } - } - - return 0; -} Index: MicroBenchmarks/libs/benchmark-1.1.0/test/fixture_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/test/fixture_test.cc @@ -1,49 +0,0 @@ - -#include "benchmark/benchmark.h" - -#include -#include - -class MyFixture : public ::benchmark::Fixture { - public: - void SetUp(const ::benchmark::State& state) { - if (state.thread_index == 0) { - assert(data.get() == nullptr); - data.reset(new int(42)); - } - } - - void TearDown(const ::benchmark::State& state) { - if (state.thread_index == 0) { - assert(data.get() != nullptr); - data.reset(); - } - } - - ~MyFixture() { assert(data == nullptr); } - - std::unique_ptr data; -}; - -BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { - assert(data.get() != nullptr); - assert(*data == 42); - while (st.KeepRunning()) { - } -} - -BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { - if (st.thread_index == 0) { - assert(data.get() != nullptr); - assert(*data == 42); - } - while (st.KeepRunning()) { - assert(data.get() != nullptr); - assert(*data == 42); - } - st.SetItemsProcessed(st.range(0)); -} -BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); -BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); - -BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.1.0/test/map_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/test/map_test.cc @@ -1,56 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include - -namespace { - -std::map ConstructRandomMap(int size) { - std::map m; - for (int i = 0; i < size; ++i) { - m.insert(std::make_pair(rand() % size, rand() % size)); - } - return m; -} - -} // namespace - -// Basic version. -static void BM_MapLookup(benchmark::State& state) { - const int size = state.range(0); - while (state.KeepRunning()) { - state.PauseTiming(); - std::map m = ConstructRandomMap(size); - state.ResumeTiming(); - for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(rand() % size)); - } - } - state.SetItemsProcessed(state.iterations() * size); -} -BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); - -// Using fixtures. -class MapFixture : public ::benchmark::Fixture { - public: - void SetUp(const ::benchmark::State& st) { - m = ConstructRandomMap(st.range(0)); - } - - void TearDown(const ::benchmark::State&) { m.clear(); } - - std::map m; -}; - -BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { - const int size = state.range(0); - while (state.KeepRunning()) { - for (int i = 0; i < size; ++i) { - benchmark::DoNotOptimize(m.find(rand() % size)); - } - } - state.SetItemsProcessed(state.iterations() * size); -} -BENCHMARK_REGISTER_F(MapFixture, Lookup)->Range(1 << 3, 1 << 12); - -BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.1.0/test/multiple_ranges_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/test/multiple_ranges_test.cc @@ -1,74 +0,0 @@ -#include "benchmark/benchmark.h" - -#include -#include - -class MultipleRangesFixture : public ::benchmark::Fixture { - public: - MultipleRangesFixture() - : expectedValues({{1, 3, 5}, - {1, 3, 8}, - {1, 3, 15}, - {2, 3, 5}, - {2, 3, 8}, - {2, 3, 15}, - {1, 4, 5}, - {1, 4, 8}, - {1, 4, 15}, - {2, 4, 5}, - {2, 4, 8}, - {2, 4, 15}, - {1, 7, 5}, - {1, 7, 8}, - {1, 7, 15}, - {2, 7, 5}, - {2, 7, 8}, - {2, 7, 15}, - {7, 6, 3}}) {} - - void SetUp(const ::benchmark::State& state) { - std::vector ranges = {state.range(0), state.range(1), state.range(2)}; - - assert(expectedValues.find(ranges) != expectedValues.end()); - - actualValues.insert(ranges); - } - - virtual ~MultipleRangesFixture() { - assert(actualValues.size() == expectedValues.size()); - } - - std::set> expectedValues; - std::set> actualValues; -}; - -BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { - while (state.KeepRunning()) { - int product = state.range(0) * state.range(1) * state.range(2); - for (int x = 0; x < product; x++) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK_REGISTER_F(MultipleRangesFixture, Empty) - ->RangeMultiplier(2) - ->Ranges({{1, 2}, {3, 7}, {5, 15}}) - ->Args({7, 6, 3}); - -void BM_CheckDefaultArgument(benchmark::State& state) { - // Test that the 'range()' without an argument is the same as 'range(0)'. - assert(state.range() == state.range(0)); - assert(state.range() != state.range(1)); - while (state.KeepRunning()) { - } -} -BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); - -static void BM_MultipleRanges(benchmark::State& st) { - while (st.KeepRunning()) { - } -} -BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); - -BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.1.0/test/output_test.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/test/output_test.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef TEST_OUTPUT_TEST_H -#define TEST_OUTPUT_TEST_H - -#undef NDEBUG -#include -#include -#include -#include -#include - -#include "../src/re.h" -#include "benchmark/benchmark.h" - -#define CONCAT2(x, y) x##y -#define CONCAT(x, y) CONCAT2(x, y) - -#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) - -#define SET_SUBSTITUTIONS(...) \ - int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__) - -enum MatchRules { - MR_Default, // Skip non-matching lines until a match is found. - MR_Next, // Match must occur on the next line. - MR_Not // No line between the current position and the next match matches - // the regex -}; - -struct TestCase { - TestCase(std::string re, int rule = MR_Default); - - std::string regex_str; - int match_rule; - std::string substituted_regex; - std::shared_ptr regex; -}; - -enum TestCaseID { - TC_ConsoleOut, - TC_ConsoleErr, - TC_JSONOut, - TC_JSONErr, - TC_CSVOut, - TC_CSVErr, - - TC_NumID // PRIVATE -}; - -// Add a list of test cases to be run against the output specified by -// 'ID' -int AddCases(TestCaseID ID, std::initializer_list il); - -// Add or set a list of substitutions to be performed on constructed regex's -// See 'output_test_helper.cc' for a list of default substitutions. -int SetSubstitutions( - std::initializer_list> il); - -// Run all output tests. -void RunOutputTests(int argc, char* argv[]); - -// ========================================================================= // -// --------------------------- Misc Utilities ------------------------------ // -// ========================================================================= // - -namespace { - -const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; - -} // end namespace - -#endif // TEST_OUTPUT_TEST_H Index: MicroBenchmarks/libs/benchmark-1.1.0/test/skip_with_error_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/test/skip_with_error_test.cc @@ -1,150 +0,0 @@ - -#undef NDEBUG -#include -#include - -#include "../src/check.h" // NOTE: check.h is for internal use only! -#include "benchmark/benchmark.h" - -namespace { - -class TestReporter : public benchmark::ConsoleReporter { - public: - virtual bool ReportContext(const Context& context) { - return ConsoleReporter::ReportContext(context); - }; - - virtual void ReportRuns(const std::vector& report) { - all_runs_.insert(all_runs_.end(), begin(report), end(report)); - ConsoleReporter::ReportRuns(report); - } - - TestReporter() {} - virtual ~TestReporter() {} - - mutable std::vector all_runs_; -}; - -struct TestCase { - std::string name; - bool error_occurred; - std::string error_message; - - typedef benchmark::BenchmarkReporter::Run Run; - - void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name) << "expected " << name << " got " - << run.benchmark_name; - CHECK(error_occurred == run.error_occurred); - CHECK(error_message == run.error_message); - if (error_occurred) { - // CHECK(run.iterations == 0); - } else { - CHECK(run.iterations != 0); - } - } -}; - -std::vector ExpectedResults; - -int AddCases(const char* base_name, std::initializer_list const& v) { - for (auto TC : v) { - TC.name = base_name + TC.name; - ExpectedResults.push_back(std::move(TC)); - } - return 0; -} - -#define CONCAT(x, y) CONCAT2(x, y) -#define CONCAT2(x, y) x##y -#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = AddCases(__VA_ARGS__) - -} // end namespace - -void BM_error_before_running(benchmark::State& state) { - state.SkipWithError("error message"); - while (state.KeepRunning()) { - assert(false); - } -} -BENCHMARK(BM_error_before_running); -ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); - -void BM_error_during_running(benchmark::State& state) { - int first_iter = true; - while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { - assert(first_iter); - first_iter = false; - state.SkipWithError("error message"); - } else { - state.PauseTiming(); - state.ResumeTiming(); - } - } -} -BENCHMARK(BM_error_during_running)->Arg(1)->Arg(2)->ThreadRange(1, 8); -ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, - {"/1/threads:2", true, "error message"}, - {"/1/threads:4", true, "error message"}, - {"/1/threads:8", true, "error message"}, - {"/2/threads:1", false, ""}, - {"/2/threads:2", false, ""}, - {"/2/threads:4", false, ""}, - {"/2/threads:8", false, ""}}); - -void BM_error_after_running(benchmark::State& state) { - while (state.KeepRunning()) { - benchmark::DoNotOptimize(state.iterations()); - } - if (state.thread_index <= (state.threads / 2)) - state.SkipWithError("error message"); -} -BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); -ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, - {"/threads:2", true, "error message"}, - {"/threads:4", true, "error message"}, - {"/threads:8", true, "error message"}}); - -void BM_error_while_paused(benchmark::State& state) { - bool first_iter = true; - while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { - assert(first_iter); - first_iter = false; - state.PauseTiming(); - state.SkipWithError("error message"); - } else { - state.PauseTiming(); - state.ResumeTiming(); - } - } -} -BENCHMARK(BM_error_while_paused)->Arg(1)->Arg(2)->ThreadRange(1, 8); -ADD_CASES("BM_error_while_paused", {{"/1/threads:1", true, "error message"}, - {"/1/threads:2", true, "error message"}, - {"/1/threads:4", true, "error message"}, - {"/1/threads:8", true, "error message"}, - {"/2/threads:1", false, ""}, - {"/2/threads:2", false, ""}, - {"/2/threads:4", false, ""}, - {"/2/threads:8", false, ""}}); - -int main(int argc, char* argv[]) { - benchmark::Initialize(&argc, argv); - - TestReporter test_reporter; - benchmark::RunSpecifiedBenchmarks(&test_reporter); - - typedef benchmark::BenchmarkReporter::Run Run; - auto EB = ExpectedResults.begin(); - - for (Run const& run : test_reporter.all_runs_) { - assert(EB != ExpectedResults.end()); - EB->CheckRun(run); - ++EB; - } - assert(EB == ExpectedResults.end()); - - return 0; -} Index: MicroBenchmarks/libs/benchmark-1.1.0/tools/compare_bench.py =================================================================== --- MicroBenchmarks/libs/benchmark-1.1.0/tools/compare_bench.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python -""" -compare_bench.py - Compare two benchmarks or their results and report the - difference. -""" -import sys -import gbench -from gbench import util, report - -def main(): - # Parse the command line flags - def usage(): - print('compare_bench.py [benchmark options]...') - exit(1) - if '--help' in sys.argv or len(sys.argv) < 3: - usage() - tests = sys.argv[1:3] - bench_opts = sys.argv[3:] - bench_opts = list(bench_opts) - # Run the benchmarks and report the results - json1 = gbench.util.run_or_load_benchmark(tests[0], bench_opts) - json2 = gbench.util.run_or_load_benchmark(tests[1], bench_opts) - output_lines = gbench.report.generate_difference_report(json1, json2) - print 'Comparing %s to %s' % (tests[0], tests[1]) - for ln in output_lines: - print(ln) - - -if __name__ == '__main__': - main() Index: MicroBenchmarks/libs/benchmark-1.1.0/tools/gbench/__init__.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.1.0/tools/gbench/__init__.py @@ -1,8 +0,0 @@ -"""Google Benchmark tooling""" - -__author__ = 'Eric Fiselier' -__email__ = 'eric@efcs.ca' -__versioninfo__ = (0, 5, 0) -__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' - -__all__ = [] Index: MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh +++ MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh @@ -10,12 +10,18 @@ git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi +# Setup libc++ options +if [ -z "$BUILD_32_BITS" ]; then + export BUILD_32_BITS=OFF && echo disabling 32 bit build +fi + # Build and install libc++ (Use unstable ABI for better sanitizer coverage) mkdir llvm-build && cd llvm-build cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ -DLIBCXX_ABI_UNSTABLE=ON \ -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ + -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ ../llvm-source make cxx -j2 sudo make install-cxxabi install-cxx Index: MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml @@ -0,0 +1,157 @@ +sudo: required +dist: trusty +language: cpp + +env: + global: + - /usr/local/bin:$PATH + +matrix: + include: + - compiler: gcc + addons: + apt: + packages: + - lcov + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage + - compiler: gcc + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug + - compiler: gcc + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release + - compiler: gcc + addons: + apt: + packages: + - g++-multilib + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON + - compiler: gcc + addons: + apt: + packages: + - g++-multilib + env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON + - compiler: gcc + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-6 + env: + - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug + - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" + - compiler: clang + env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug + - compiler: clang + env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release + # Clang w/ libc++ + - compiler: clang + addons: + apt: + packages: + clang-3.8 + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 + - EXTRA_FLAGS="-stdlib=libc++" + - compiler: clang + addons: + apt: + packages: + clang-3.8 + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release + - LIBCXX_BUILD=1 + - EXTRA_FLAGS="-stdlib=libc++" + # Clang w/ 32bit libc++ + - compiler: clang + addons: + apt: + packages: + - clang-3.8 + - g++-multilib + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-stdlib=libc++ -m32" + # Clang w/ 32bit libc++ + - compiler: clang + addons: + apt: + packages: + - clang-3.8 + - g++-multilib + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release + - LIBCXX_BUILD=1 + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-stdlib=libc++ -m32" + # Clang w/ libc++, ASAN, UBSAN + - compiler: clang + addons: + apt: + packages: + clang-3.8 + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" + - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" + - UBSAN_OPTIONS=print_stacktrace=1 + # Clang w/ libc++ and MSAN + - compiler: clang + addons: + apt: + packages: + clang-3.8 + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug + - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins + - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" + # Clang w/ libc++ and MSAN + - compiler: clang + addons: + apt: + packages: + clang-3.8 + env: + - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo + - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread + - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" + + - os: osx + osx_image: xcode8.3 + compiler: clang + env: + - COMPILER=clang++ BUILD_TYPE=Debug + - os: osx + osx_image: xcode8.3 + compiler: clang + env: + - COMPILER=clang++ BUILD_TYPE=Release + +before_script: + - if [ -z "$BUILD_32_BITS" ]; then + export BUILD_32_BITS=OFF && echo disabling 32 bit build; + fi + - if [ -n "${LIBCXX_BUILD}" ]; then + source .travis-libcxx-setup.sh; + fi + - mkdir build && cd build + +install: + - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then + PATH=~/.local/bin:${PATH}; + pip install --user --upgrade pip; + pip install --user cpp-coveralls; + fi + +script: + - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} .. + - make + - ctest -C ${BUILD_TYPE} --output-on-failure + +after_success: + - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then + coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .; + fi Index: MicroBenchmarks/libs/benchmark-1.2.0/AUTHORS =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/AUTHORS +++ MicroBenchmarks/libs/benchmark-1.2.0/AUTHORS @@ -18,17 +18,22 @@ Evgeny Safronov Felix Homann Google Inc. +International Business Machines Corporation Ismael Jimenez Martinez +Jern-Kuan Leong +Joao Paulo Magalhaes JianXiong Zhou Jussi Knuuttila Kaito Udagawa Lei Xu Matt Clarkson +Maxim Vafin Nick Hutchinson Oleksandr Sochka Paul Redmond Radoslav Yovchev Shuo Chen +Yixuan Qiu Yusuke Suzuki Dirac Research Zbigniew Skowron Index: MicroBenchmarks/libs/benchmark-1.2.0/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/CMakeLists.txt +++ MicroBenchmarks/libs/benchmark-1.2.0/CMakeLists.txt @@ -1,4 +1,5 @@ -cmake_minimum_required (VERSION 2.8.11) +cmake_minimum_required (VERSION 2.8.12) + project (benchmark) foreach(p @@ -11,8 +12,11 @@ endforeach() option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) +option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) +option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library" OFF) + # Make sure we can import out CMake functions list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") @@ -33,12 +37,20 @@ include(AddCXXCompilerFlag) include(CXXFeatureCheck) +if (BENCHMARK_BUILD_32_BITS) + add_required_cxx_compiler_flag(-m32) +endif() + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # Turn compiler warnings up to 11 string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") add_definitions(-D_CRT_SECURE_NO_WARNINGS) + if (NOT BENCHMARK_ENABLE_EXCEPTIONS) + add_cxx_compiler_flag(-EHs-) + add_cxx_compiler_flag(-EHa-) + endif() # Link time optimisation if (BENCHMARK_ENABLE_LTO) set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL") @@ -80,12 +92,20 @@ add_cxx_compiler_flag(-Wshorten-64-to-32) add_cxx_compiler_flag(-Wfloat-equal) add_cxx_compiler_flag(-fstrict-aliasing) + if (NOT BENCHMARK_ENABLE_EXCEPTIONS) + add_cxx_compiler_flag(-fno-exceptions) + endif() if (NOT BENCHMARK_USE_LIBCXX) add_cxx_compiler_flag(-Wzero-as-null-pointer-constant) endif() if (HAVE_CXX_FLAG_FSTRICT_ALIASING) - add_cxx_compiler_flag(-Wstrict-aliasing) + if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing + add_cxx_compiler_flag(-Wstrict-aliasing) + endif() endif() + # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden + # (because of deprecated overload) + add_cxx_compiler_flag(-wd654) add_cxx_compiler_flag(-Wthread-safety) if (HAVE_CXX_FLAG_WTHREAD_SAFETY) cxx_feature_check(THREAD_SAFETY_ATTRIBUTES) @@ -162,7 +182,10 @@ if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) message(FATAL_ERROR "Failed to determine the source files for the regular expression backend") endif() - +if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX + AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) + message(WARNING "Using std::regex with exceptions disabled is not fully supported") +endif() cxx_feature_check(STEADY_CLOCK) # Ensure we have pthreads find_package(Threads REQUIRED) Index: MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTORS =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTORS +++ MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTORS @@ -34,19 +34,25 @@ Evgeny Safronov Felix Homann Ismael Jimenez Martinez +Jern-Kuan Leong +Joao Paulo Magalhaes JianXiong Zhou Jussi Knuuttila Kaito Udagawa Kai Wolf Lei Xu Matt Clarkson +Maxim Vafin Nick Hutchinson Oleksandr Sochka Pascal Leroy Paul Redmond Pierre Phaneuf Radoslav Yovchev +Ray Glover Shuo Chen +Tom Madams +Yixuan Qiu Yusuke Suzuki Tobias Ulvgård Zbigniew Skowron Index: MicroBenchmarks/libs/benchmark-1.2.0/README.md =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/README.md +++ MicroBenchmarks/libs/benchmark-1.2.0/README.md @@ -11,6 +11,8 @@ [Known issues and common problems](#known-issues) +[Additional Tooling Documentation](docs/tools.md) + ## Example usage ### Basic usage Define a function that executes the code to be measured. @@ -363,7 +365,7 @@ } ``` -Note that `ClobberMemory()` is only available for GNU based compilers. +Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. ### Set time unit manually If a benchmark runs a few milliseconds it may be hard to visually compare the @@ -430,6 +432,132 @@ /* BarTest is now registered */ ``` + +## User-defined counters + +You can add your own counters with user-defined names. The example below +will add columns "Foo", "Bar" and "Baz" in its output: + +```c++ +static void UserCountersExample1(benchmark::State& state) { + double numFoos = 0, numBars = 0, numBazs = 0; + while (state.KeepRunning()) { + // ... count Foo,Bar,Baz events + } + state.counters["Foo"] = numFoos; + state.counters["Bar"] = numBars; + state.counters["Baz"] = numBazs; +} +``` + +The `state.counters` object is a `std::map` with `std::string` keys +and `Counter` values. The latter is a `double`-like class, via an implicit +conversion to `double&`. Thus you can use all of the standard arithmetic +assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. + +In multithreaded benchmarks, each counter is set on the calling thread only. +When the benchmark finishes, the counters from each thread will be summed; +the resulting sum is the value which will be shown for the benchmark. + +The `Counter` constructor accepts two parameters: the value as a `double` +and a bit flag which allows you to show counters as rates and/or as +per-thread averages: + +```c++ + // sets a simple counter + state.counters["Foo"] = numFoos; + + // Set the counter as a rate. It will be presented divided + // by the duration of the benchmark. + state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); + + // Set the counter as a thread-average quantity. It will + // be presented divided by the number of threads. + state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); + + // There's also a combined flag: + state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); +``` + +When you're compiling in C++11 mode or later you can use `insert()` with +`std::initializer_list`: + +```c++ + // With C++11, this can be done: + state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); + // ... instead of: + state.counters["Foo"] = numFoos; + state.counters["Bar"] = numBars; + state.counters["Baz"] = numBazs; +``` + +### Counter reporting + +When using the console reporter, by default, user counters are are printed at +the end after the table, the same way as ``bytes_processed`` and +``items_processed``. This is best for cases in which there are few counters, +or where there are only a couple of lines per benchmark. Here's an example of +the default output: + +``` +------------------------------------------------------------------------------ +Benchmark Time CPU Iterations UserCounters... +------------------------------------------------------------------------------ +BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 +BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m +BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 +BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 +BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 +BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 +BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 +BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 +BM_Factorial 26 ns 26 ns 26608979 40320 +BM_Factorial/real_time 26 ns 26 ns 26587936 40320 +BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 +BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 +BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 +BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 +``` + +If this doesn't suit you, you can print each counter as a table column by +passing the flag `--benchmark_counters_tabular=true` to the benchmark +application. This is best for cases in which there are a lot of counters, or +a lot of lines per individual benchmark. Note that this will trigger a +reprinting of the table header any time the counter set changes between +individual benchmarks. Here's an example of corresponding output when +`--benchmark_counters_tabular=true` is passed: + +``` +--------------------------------------------------------------------------------------- +Benchmark Time CPU Iterations Bar Bat Baz Foo +--------------------------------------------------------------------------------------- +BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 +BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 +BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 +BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 +BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 +BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 +BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 +BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 +-------------------------------------------------------------- +Benchmark Time CPU Iterations +-------------------------------------------------------------- +BM_Factorial 26 ns 26 ns 26392245 40320 +BM_Factorial/real_time 26 ns 26 ns 26494107 40320 +BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 +BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 +BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 +BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 +BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 +BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 +BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 +BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 +BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 +``` +Note above the additional header printed when the benchmark changes from +``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does +not have the same counter set as ``BM_UserCounter``. + ## Exiting Benchmarks in Error When errors caused by external influences, such as file I/O and network @@ -501,7 +629,7 @@ information about the CPU and the date. The `benchmarks` attribute contains a list of ever benchmark run. Example json output looks like: -``` json +```json { "context": { "date": "2015/03/17-18:40:25", @@ -582,6 +710,7 @@ * GCC 4.8 * Clang 3.4 * Visual Studio 2013 +* Intel 2015 Update 1 Anything older *may* work. Index: MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml +++ MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml @@ -1,16 +1,18 @@ version: '{build}' +image: Visual Studio 2017 + configuration: - Debug - Release environment: matrix: - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013" + - compiler: msvc-15-seh + generator: "Visual Studio 15 2017" - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013 Win64" + - compiler: msvc-15-seh + generator: "Visual Studio 15 2017 Win64" - compiler: msvc-14-seh generator: "Visual Studio 14 2015" @@ -18,9 +20,16 @@ - compiler: msvc-14-seh generator: "Visual Studio 14 2015 Win64" + - compiler: msvc-12-seh + generator: "Visual Studio 12 2013" + + - compiler: msvc-12-seh + generator: "Visual Studio 12 2013 Win64" + - compiler: gcc-5.3.0-posix generator: "MinGW Makefiles" cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 matrix: fast_finish: true @@ -30,12 +39,6 @@ - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") -# TODO Remove this. This is a hack to work around bogus warning messages -# See http://goo.gl/euguBI for more information. -before_build: - - del "C:\Program Files (x86)\MSBuild\14.0\Microsoft.Common.targets\ImportAfter\Xamarin.Common.targets" - - del "C:\Program Files (x86)\MSBuild\12.0\Microsoft.Common.targets\ImportAfter\Xamarin.Common.targets" - build_script: - md _build -Force - cd _build @@ -51,4 +54,3 @@ name: logs - path: '_build/Testing/**/*.xml' name: test_results - Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/AddCXXCompilerFlag.cmake =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/AddCXXCompilerFlag.cmake @@ -0,0 +1,64 @@ +# - Adds a compiler flag if it is supported by the compiler +# +# This function checks that the supplied compiler flag is supported and then +# adds it to the corresponding compiler flags +# +# add_cxx_compiler_flag( []) +# +# - Example +# +# include(AddCXXCompilerFlag) +# add_cxx_compiler_flag(-Wall) +# add_cxx_compiler_flag(-no-strict-aliasing RELEASE) +# Requires CMake 2.6+ + +if(__add_cxx_compiler_flag) + return() +endif() +set(__add_cxx_compiler_flag INCLUDED) + +include(CheckCXXCompilerFlag) + +function(mangle_compiler_flag FLAG OUTPUT) + string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG) + string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG}) + string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) + string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG}) + set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE) +endfunction(mangle_compiler_flag) + +function(add_cxx_compiler_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") + if(${MANGLED_FLAG}) + set(VARIANT ${ARGV1}) + if(ARGV1) + string(TOUPPER "_${VARIANT}" VARIANT) + endif() + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + endif() +endfunction() + +function(add_required_cxx_compiler_flag FLAG) + mangle_compiler_flag("${FLAG}" MANGLED_FLAG) + set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}") + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}") + check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) + set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") + if(${MANGLED_FLAG}) + set(VARIANT ${ARGV1}) + if(ARGV1) + string(TOUPPER "_${VARIANT}" VARIANT) + endif() + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) + set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE) + else() + message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler") + endif() +endfunction() Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/CXXFeatureCheck.cmake =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/cmake/CXXFeatureCheck.cmake +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/CXXFeatureCheck.cmake @@ -10,7 +10,7 @@ # # include(CXXFeatureCheck) # cxx_feature_check(STD_REGEX) -# Requires CMake 2.6+ +# Requires CMake 2.8.12+ if(__cxx_feature_check) return() @@ -22,6 +22,8 @@ string(TOUPPER ${FILE} VAR) string(TOUPPER "HAVE_${VAR}" FEATURE) if (DEFINED HAVE_${VAR}) + set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE) + add_definitions(-DHAVE_${VAR}) return() endif() message("-- Performing Test ${FEATURE}") Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in @@ -0,0 +1 @@ +include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") Index: MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md @@ -0,0 +1,59 @@ +# Benchmark Tools + +## compare_bench.py + +The `compare_bench.py` utility which can be used to compare the result of benchmarks. +The program is invoked like: + +``` bash +$ compare_bench.py [benchmark options]... +``` + +Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. + +The sample output using the JSON test files under `Inputs/` gives: + +``` bash +$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json +Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json +Benchmark Time CPU +---------------------------------------------- +BM_SameTimes +0.00 +0.00 +BM_2xFaster -0.50 -0.50 +BM_2xSlower +1.00 +1.00 +BM_10PercentFaster -0.10 -0.10 +BM_10PercentSlower +0.10 +0.10 +``` + +When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like: + +``` +./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.* +RUNNING: test/basic_test --benchmark_filter=BM_empty.* +Run on (4 X 4228.32 MHz CPU s) +2016-08-02 19:21:33 +Benchmark Time CPU Iterations +-------------------------------------------------------------------- +BM_empty 9 ns 9 ns 79545455 +BM_empty/threads:4 4 ns 9 ns 75268816 +BM_empty_stop_start 8 ns 8 ns 83333333 +BM_empty_stop_start/threads:4 3 ns 8 ns 83333332 +RUNNING: test/basic_test --benchmark_filter=BM_empty.* +Run on (4 X 4228.32 MHz CPU s) +2016-08-02 19:21:35 +Benchmark Time CPU Iterations +-------------------------------------------------------------------- +BM_empty 9 ns 9 ns 76086957 +BM_empty/threads:4 4 ns 9 ns 76086956 +BM_empty_stop_start 8 ns 8 ns 87500000 +BM_empty_stop_start/threads:4 3 ns 8 ns 88607596 +Comparing test/basic_test to test/basic_test +Benchmark Time CPU +--------------------------------------------------------- +BM_empty +0.00 +0.00 +BM_empty/threads:4 +0.00 +0.00 +BM_empty_stop_start +0.00 +0.00 +BM_empty_stop_start/threads:4 +0.00 +0.00 +``` + +Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks. Index: MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark.h +++ MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark.h @@ -1,3 +1,17 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Support for registering benchmarks for functions. /* Example usage: @@ -146,28 +160,93 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); */ -#ifndef BENCHMARK_BENCHMARK_API_H_ -#define BENCHMARK_BENCHMARK_API_H_ +#ifndef BENCHMARK_BENCHMARK_H_ +#define BENCHMARK_BENCHMARK_H_ + + +#if __cplusplus >= 201103L +#define BENCHMARK_HAS_CXX11 +#endif -#include -#include #include +#include +#include +#include #include #include - -#include "macros.h" +#include +#include #if defined(BENCHMARK_HAS_CXX11) #include +#include #include #endif +#if defined(_MSC_VER) +#include // for _ReadWriteBarrier +#endif + +#ifndef BENCHMARK_HAS_CXX11 +#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&); \ + TypeName& operator=(const TypeName&) +#else +#define BENCHMARK_DISALLOW_COPY_AND_ASSIGN(TypeName) \ + TypeName(const TypeName&) = delete; \ + TypeName& operator=(const TypeName&) = delete +#endif + +#if defined(__GNUC__) +#define BENCHMARK_UNUSED __attribute__((unused)) +#define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#elif defined(_MSC_VER) && !defined(__clang__) +#define BENCHMARK_UNUSED +#define BENCHMARK_ALWAYS_INLINE __forceinline +#if _MSC_VER >= 1900 +#define BENCHMARK_NOEXCEPT noexcept +#define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) +#else +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif +#define __func__ __FUNCTION__ +#else +#define BENCHMARK_UNUSED +#define BENCHMARK_ALWAYS_INLINE +#define BENCHMARK_NOEXCEPT +#define BENCHMARK_NOEXCEPT_OP(x) +#endif + +#define BENCHMARK_INTERNAL_TOSTRING2(x) #x +#define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) + +#if defined(__GNUC__) +#define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) +#define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#else +#define BENCHMARK_BUILTIN_EXPECT(x, y) x +#define BENCHMARK_DEPRECATED_MSG(msg) +#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg)) +#endif + +#if defined(__GNUC__) && !defined(__clang__) +#define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +#endif + + namespace benchmark { class BenchmarkReporter; void Initialize(int* argc, char** argv); +// Report to stdout all arguments in 'argv' as unrecognized except the first. +// Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). +bool ReportUnrecognizedArguments(int argc, char** argv); + // Generate a list of benchmarks matching the specified --benchmark_filter flag // and if --benchmark_list_tests is specified return after printing the name // of each matching benchmark. Otherwise run each matching benchmark and @@ -197,19 +276,6 @@ class BenchmarkImp; class BenchmarkFamilies; -template -struct Voider { - typedef void type; -}; - -template -struct EnableIfString {}; - -template -struct EnableIfString::type> { - typedef int type; -}; - void UseCharPointer(char const volatile*); // Take ownership of the pointer and register the benchmark. Return the @@ -220,30 +286,84 @@ int InitializeStreams(); BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); -} // end namespace internal +} // namespace internal + + +#if !defined(__GNUC__) || defined(__pnacl__) || defined(EMSCRIPTN) +# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#endif // The DoNotOptimize(...) function can be used to prevent a value or // expression from being optimized away by the compiler. This function is // intended to add little to no overhead. // See: https://youtu.be/nXaxk27zwlk?t=2441 -#if defined(__GNUC__) +#ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY template inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + // Clang doesn't like the 'X' constraint on `value` and certain GCC versions + // don't like the 'g' constraint. Attempt to placate them both. +#if defined(__clang__) asm volatile("" : : "g"(value) : "memory"); +#else + asm volatile("" : : "i,r,m"(value) : "memory"); +#endif } // Force the compiler to flush pending writes to global memory. Acts as an // effective read/write barrier inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { asm volatile("" : : : "memory"); } +#elif defined(_MSC_VER) +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + internal::UseCharPointer(&reinterpret_cast(value)); + _ReadWriteBarrier(); +} + +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { + _ReadWriteBarrier(); +} #else template inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { internal::UseCharPointer(&reinterpret_cast(value)); } -// FIXME Add ClobberMemory() for non-gnu compilers +// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers #endif + + +// This class is used for user-defined counters. +class Counter { +public: + + enum Flags { + kDefaults = 0, + // Mark the counter as a rate. It will be presented divided + // by the duration of the benchmark. + kIsRate = 1, + // Mark the counter as a thread-average quantity. It will be + // presented divided by the number of threads. + kAvgThreads = 2, + // Mark the counter as a thread-average rate. See above. + kAvgThreadsRate = kIsRate|kAvgThreads + }; + + double value; + Flags flags; + + BENCHMARK_ALWAYS_INLINE + Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {} + + BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; } + BENCHMARK_ALWAYS_INLINE operator double & () { return value; } + +}; + +// This is the container for the user-defined counters. +typedef std::map UserCounters; + + // TimeUnit is passed to a benchmark in order to specify the order of magnitude // for the measured time. enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond }; @@ -262,16 +382,17 @@ class ThreadTimer; class ThreadManager; +enum ReportMode #if defined(BENCHMARK_HAS_CXX11) -enum ReportMode : unsigned { + : unsigned #else -enum ReportMode { #endif + { RM_Unspecified, // The mode has not been manually specified RM_Default, // The mode is user-specified as default. RM_ReportAggregatesOnly }; -} +} // namespace internal // State is passed to a running Benchmark and contains state for the // benchmark to use. @@ -393,13 +514,7 @@ // REQUIRES: a benchmark has exited its KeepRunning loop. void SetLabel(const char* label); - // Allow the use of std::string without actually including . - // This function does not participate in overload resolution unless StringType - // has the nested typename `basic_string`. This typename should be provided - // as an injected class name in the case of std::string. - template - void SetLabel(StringType const& str, - typename internal::EnableIfString::type = 1) { + void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) { this->SetLabel(str.c_str()); } @@ -434,13 +549,15 @@ bool error_occurred_; public: + // Container for user-defined counters. + UserCounters counters; // Index of the executing thread. Values from [0, threads). const int thread_index; // Number of threads concurrently executing the benchmark. const int threads; const size_t max_iterations; - // TODO make me private + // TODO(EricWF) make me private State(size_t max_iters, const std::vector& ranges, int thread_i, int n_threads, internal::ThreadTimer* timer, internal::ThreadManager* manager); @@ -536,9 +653,17 @@ // Set the minimum amount of time to use when running this benchmark. This // option overrides the `benchmark_min_time` flag. - // REQUIRES: `t > 0` + // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. Benchmark* MinTime(double t); + // Specify the amount of iterations that should be run by this benchmark. + // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. + // + // NOTE: This function should only be used when *exact* iteration control is + // needed and never to control or limit how long a benchmark runs, where + // `--benchmark_min_time=N` or `MinTime(...)` should be used instead. + Benchmark* Iterations(size_t n); + // Specify the amount of times to repeat this benchmark. This option overrides // the `benchmark_repetitions` flag. // REQUIRES: `n > 0` @@ -547,7 +672,7 @@ // Specify if each repetition of the benchmark should be reported separately // or if only the final statistics should be reported. If the benchmark // is not repeated then the single result is always reported. - Benchmark* ReportAggregatesOnly(bool v = true); + Benchmark* ReportAggregatesOnly(bool value = true); // If a particular benchmark is I/O bound, runs multiple threads internally or // if for some reason CPU timings are not representative, call this method. If @@ -627,6 +752,7 @@ TimeUnit time_unit_; int range_multiplier_; double min_time_; + size_t iterations_; int repetitions_; bool use_real_time_; bool use_manual_time_; @@ -651,6 +777,10 @@ internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn); #endif +// Remove all registered benchmarks. All pointers to previously registered +// benchmarks are invalidated. +void ClearRegisteredBenchmarks(); + namespace internal { // The class used to hold all Benchmarks created from static function. // (ie those created using the BENCHMARK(...) macros. @@ -686,7 +816,7 @@ }; #endif -} // end namespace internal +} // namespace internal inline internal::Benchmark* RegisterBenchmark(const char* name, internal::Function* fn) { @@ -738,7 +868,7 @@ virtual void BenchmarkCase(State&) = 0; }; -} // end namespace benchmark +} // namespace benchmark // ------------------------------------------------------ // Macro to register benchmarks @@ -858,7 +988,223 @@ #define BENCHMARK_MAIN() \ int main(int argc, char** argv) { \ ::benchmark::Initialize(&argc, argv); \ + if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ ::benchmark::RunSpecifiedBenchmarks(); \ } -#endif // BENCHMARK_BENCHMARK_API_H_ + +// ------------------------------------------------------ +// Benchmark Reporters + +namespace benchmark { + +// Interface for custom benchmark result printers. +// By default, benchmark reports are printed to stdout. However an application +// can control the destination of the reports by calling +// RunSpecifiedBenchmarks and passing it a custom reporter object. +// The reporter object must implement the following interface. +class BenchmarkReporter { + public: + struct Context { + int num_cpus; + double mhz_per_cpu; + bool cpu_scaling_enabled; + + // The number of chars in the longest benchmark name. + size_t name_field_width; + }; + + struct Run { + Run() + : error_occurred(false), + iterations(1), + time_unit(kNanosecond), + real_accumulated_time(0), + cpu_accumulated_time(0), + bytes_per_second(0), + items_per_second(0), + max_heapbytes_used(0), + complexity(oNone), + complexity_lambda(), + complexity_n(0), + report_big_o(false), + report_rms(false), + counters() {} + + std::string benchmark_name; + std::string report_label; // Empty if not set by benchmark. + bool error_occurred; + std::string error_message; + + int64_t iterations; + TimeUnit time_unit; + double real_accumulated_time; + double cpu_accumulated_time; + + // Return a value representing the real time per iteration in the unit + // specified by 'time_unit'. + // NOTE: If 'iterations' is zero the returned value represents the + // accumulated time. + double GetAdjustedRealTime() const; + + // Return a value representing the cpu time per iteration in the unit + // specified by 'time_unit'. + // NOTE: If 'iterations' is zero the returned value represents the + // accumulated time. + double GetAdjustedCPUTime() const; + + // Zero if not set by benchmark. + double bytes_per_second; + double items_per_second; + + // This is set to 0.0 if memory tracing is not enabled. + double max_heapbytes_used; + + // Keep track of arguments to compute asymptotic complexity + BigO complexity; + BigOFunc* complexity_lambda; + int complexity_n; + + // Inform print function whether the current run is a complexity report + bool report_big_o; + bool report_rms; + + UserCounters counters; + }; + + // Construct a BenchmarkReporter with the output stream set to 'std::cout' + // and the error stream set to 'std::cerr' + BenchmarkReporter(); + + // Called once for every suite of benchmarks run. + // The parameter "context" contains information that the + // reporter may wish to use when generating its report, for example the + // platform under which the benchmarks are running. The benchmark run is + // never started if this function returns false, allowing the reporter + // to skip runs based on the context information. + virtual bool ReportContext(const Context& context) = 0; + + // Called once for each group of benchmark runs, gives information about + // cpu-time and heap memory usage during the benchmark run. If the group + // of runs contained more than two entries then 'report' contains additional + // elements representing the mean and standard deviation of those runs. + // Additionally if this group of runs was the last in a family of benchmarks + // 'reports' contains additional entries representing the asymptotic + // complexity and RMS of that benchmark family. + virtual void ReportRuns(const std::vector& report) = 0; + + // Called once and only once after ever group of benchmarks is run and + // reported. + virtual void Finalize() {} + + // REQUIRES: The object referenced by 'out' is valid for the lifetime + // of the reporter. + void SetOutputStream(std::ostream* out) { + assert(out); + output_stream_ = out; + } + + // REQUIRES: The object referenced by 'err' is valid for the lifetime + // of the reporter. + void SetErrorStream(std::ostream* err) { + assert(err); + error_stream_ = err; + } + + std::ostream& GetOutputStream() const { return *output_stream_; } + + std::ostream& GetErrorStream() const { return *error_stream_; } + + virtual ~BenchmarkReporter(); + + // Write a human readable string to 'out' representing the specified + // 'context'. + // REQUIRES: 'out' is non-null. + static void PrintBasicContext(std::ostream* out, Context const& context); + + private: + std::ostream* output_stream_; + std::ostream* error_stream_; +}; + +// Simple reporter that outputs benchmark data to the console. This is the +// default reporter used by RunSpecifiedBenchmarks(). +class ConsoleReporter : public BenchmarkReporter { +public: + enum OutputOptions { + OO_None = 0, + OO_Color = 1, + OO_Tabular = 2, + OO_ColorTabular = OO_Color|OO_Tabular, + OO_Defaults = OO_ColorTabular + }; + explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) + : output_options_(opts_), name_field_width_(0), + prev_counters_(), printed_header_(false) {} + + virtual bool ReportContext(const Context& context); + virtual void ReportRuns(const std::vector& reports); + + protected: + virtual void PrintRunData(const Run& report); + virtual void PrintHeader(const Run& report); + + OutputOptions output_options_; + size_t name_field_width_; + UserCounters prev_counters_; + bool printed_header_; +}; + +class JSONReporter : public BenchmarkReporter { + public: + JSONReporter() : first_report_(true) {} + virtual bool ReportContext(const Context& context); + virtual void ReportRuns(const std::vector& reports); + virtual void Finalize(); + + private: + void PrintRunData(const Run& report); + + bool first_report_; +}; + +class CSVReporter : public BenchmarkReporter { + public: + CSVReporter() : printed_header_(false) {} + virtual bool ReportContext(const Context& context); + virtual void ReportRuns(const std::vector& reports); + + private: + void PrintRunData(const Run& report); + + bool printed_header_; + std::set< std::string > user_counter_names_; +}; + +inline const char* GetTimeUnitString(TimeUnit unit) { + switch (unit) { + case kMillisecond: + return "ms"; + case kMicrosecond: + return "us"; + case kNanosecond: + default: + return "ns"; + } +} + +inline double GetTimeUnitMultiplier(TimeUnit unit) { + switch (unit) { + case kMillisecond: + return 1e3; + case kMicrosecond: + return 1e6; + case kNanosecond: + default: + return 1e9; + } +} + +} // namespace benchmark + +#endif // BENCHMARK_BENCHMARK_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h +++ MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h @@ -11,11 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef BENCHMARK_BENCHMARK_H_ -#define BENCHMARK_BENCHMARK_H_ +#ifndef BENCHMARK_BENCHMARK_API_H_ +#define BENCHMARK_BENCHMARK_API_H_ -#include "benchmark_api.h" -#include "macros.h" -#include "reporter.h" +#ifdef __DEPRECATED +# ifndef BENCHMARK_WARNING_MSG +# warning the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead +# else + BENCHMARK_WARNING_MSG("the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead") +# endif +#endif -#endif // BENCHMARK_BENCHMARK_H_ +#include "benchmark.h" // For forward declaration of BenchmarkReporter + +#endif // BENCHMARK_BENCHMARK_API_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h +++ MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h @@ -11,11 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef BENCHMARK_BENCHMARK_H_ -#define BENCHMARK_BENCHMARK_H_ +#ifndef BENCHMARK_REPORTER_H_ +#define BENCHMARK_REPORTER_H_ -#include "benchmark_api.h" -#include "macros.h" -#include "reporter.h" +#ifdef __DEPRECATED +# ifndef BENCHMARK_WARNING_MSG +# warning the reporter.h header has been deprecated and will be removed, please include benchmark.h instead +# else + BENCHMARK_WARNING_MSG("the reporter.h header has been deprecated and will be removed, please include benchmark.h instead") +# endif +#endif -#endif // BENCHMARK_BENCHMARK_H_ +#include "benchmark.h" // For forward declaration of BenchmarkReporter + +#endif // BENCHMARK_REPORTER_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/CMakeLists.txt @@ -0,0 +1,78 @@ +# Allow the source files to find headers in src/ +include_directories(${PROJECT_SOURCE_DIR}/src) + +if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) + list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) + list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) +endif() + +file(GLOB + SOURCE_FILES + *.cc + ${PROJECT_SOURCE_DIR}/include/benchmark/*.h + ${CMAKE_CURRENT_SOURCE_DIR}/*.h) + +add_library(benchmark ${SOURCE_FILES}) +set_target_properties(benchmark PROPERTIES + OUTPUT_NAME "benchmark" + VERSION ${GENERIC_LIB_VERSION} + SOVERSION ${GENERIC_LIB_SOVERSION} +) +target_include_directories(benchmark PUBLIC + $ + ) + +# Link threads. +target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) +find_library(LIBRT rt) +if(LIBRT) + target_link_libraries(benchmark ${LIBRT}) +endif() + +# We need extra libraries on Windows +if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") + target_link_libraries(benchmark Shlwapi) +endif() + +set(include_install_dir "include") +set(lib_install_dir "lib/") +set(bin_install_dir "bin/") +set(config_install_dir "lib/cmake/${PROJECT_NAME}") + +set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") + +set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") +set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") +set(targets_export_name "${PROJECT_NAME}Targets") + +set(namespace "${PROJECT_NAME}::") + +include(CMakePackageConfigHelpers) +write_basic_package_version_file( + "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion +) + +configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) + +# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) +install( + TARGETS benchmark + EXPORT ${targets_export_name} + ARCHIVE DESTINATION ${lib_install_dir} + LIBRARY DESTINATION ${lib_install_dir} + RUNTIME DESTINATION ${bin_install_dir} + INCLUDES DESTINATION ${include_install_dir}) + +install( + DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" + DESTINATION ${include_install_dir} + FILES_MATCHING PATTERN "*.*h") + +install( + FILES "${project_config}" "${version_config}" + DESTINATION "${config_install_dir}") + +install( + EXPORT "${targets_export_name}" + NAMESPACE "${namespace}" + DESTINATION "${config_install_dir}") Index: MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark.cc @@ -37,6 +37,7 @@ #include "colorprint.h" #include "commandlineflags.h" #include "complexity.h" +#include "counter.h" #include "log.h" #include "mutex.h" #include "re.h" @@ -90,6 +91,11 @@ "environment variable is set to a terminal type that supports " "colors."); +DEFINE_bool(benchmark_counters_tabular, false, + "Whether to use tabular format when printing user counters to " + "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0." + "Defaults to false."); + DEFINE_int32(v, 0, "The level of verbose logging to output"); namespace benchmark { @@ -145,6 +151,7 @@ std::string report_label_; std::string error_message_; bool has_error_ = false; + UserCounters counters; }; GUARDED_BY(GetBenchmarkMutex()) Result results; @@ -249,6 +256,8 @@ report.complexity_n = results.complexity_n; report.complexity = b.complexity; report.complexity_lambda = b.complexity_lambda; + report.counters = results.counters; + internal::Finish(&report.counters, seconds, b.threads); } return report; } @@ -272,6 +281,7 @@ results.bytes_processed += st.bytes_processed(); results.items_processed += st.items_processed(); results.complexity_n += st.complexity_length_n(); + internal::Increment(&results.counters, st.counters); } manager->NotifyThreadComplete(); } @@ -281,7 +291,8 @@ std::vector* complexity_reports) { std::vector reports; // return value - size_t iters = 1; + const bool has_explicit_iteration_count = b.iterations != 0; + size_t iters = has_explicit_iteration_count ? b.iterations : 1; std::unique_ptr manager; std::vector pool(b.threads - 1); const int repeats = @@ -291,7 +302,7 @@ (b.report_mode == internal::RM_Unspecified ? FLAGS_benchmark_report_aggregates_only : b.report_mode == internal::RM_ReportAggregatesOnly); - for (int i = 0; i < repeats; i++) { + for (int repetition_num = 0; repetition_num < repeats; repetition_num++) { for (;;) { // Try benchmark VLOG(2) << "Running " << b.name << " for " << iters << "\n"; @@ -327,10 +338,20 @@ const double min_time = !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time; - // If this was the first run, was elapsed time or cpu time large enough? - // If this is not the first run, go with the current value of iter. - if ((i > 0) || results.has_error_ || (iters >= kMaxIterations) || - (seconds >= min_time) || (results.real_time_used >= 5 * min_time)) { + + // Determine if this run should be reported; Either it has + // run for a sufficient amount of time or because an error was reported. + const bool should_report = repetition_num > 0 + || has_explicit_iteration_count // An exact iteration count was requested + || results.has_error_ + || iters >= kMaxIterations + || seconds >= min_time // the elapsed time is large enough + // CPU time is specified but the elapsed real time greatly exceeds the + // minimum time. Note that user provided timers are except from this + // sanity check. + || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time); + + if (should_report) { BenchmarkReporter::Run report = CreateRunReport(b, results, iters, seconds); if (!report.error_occurred && b.complexity != oNone) @@ -386,6 +407,7 @@ items_processed_(0), complexity_n_(0), error_occurred_(false), + counters(), thread_index(thread_i), threads(n_threads), max_iterations(max_iters), @@ -505,10 +527,10 @@ } std::unique_ptr CreateReporter( - std::string const& name, ConsoleReporter::OutputOptions allow_color) { + std::string const& name, ConsoleReporter::OutputOptions output_opts) { typedef std::unique_ptr PtrType; if (name == "console") { - return PtrType(new ConsoleReporter(allow_color)); + return PtrType(new ConsoleReporter(output_opts)); } else if (name == "json") { return PtrType(new JSONReporter); } else if (name == "csv") { @@ -520,6 +542,30 @@ } } // end namespace + +bool IsZero(double n) { + return std::abs(n) < std::numeric_limits::epsilon(); +} + +ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { + int output_opts = ConsoleReporter::OO_Defaults; + if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) || + IsTruthyFlagValue(FLAGS_benchmark_color)) { + output_opts |= ConsoleReporter::OO_Color; + } else { + output_opts &= ~ConsoleReporter::OO_Color; + } + if(force_no_color) { + output_opts &= ~ConsoleReporter::OO_Color; + } + if(FLAGS_benchmark_counters_tabular) { + output_opts |= ConsoleReporter::OO_Tabular; + } else { + output_opts &= ~ConsoleReporter::OO_Tabular; + } + return static_cast< ConsoleReporter::OutputOptions >(output_opts); +} + } // end namespace internal size_t RunSpecifiedBenchmarks() { @@ -541,29 +587,21 @@ std::unique_ptr default_console_reporter; std::unique_ptr default_file_reporter; if (!console_reporter) { - auto output_opts = ConsoleReporter::OO_None; - if (FLAGS_benchmark_color == "auto") - output_opts = IsColorTerminal() ? ConsoleReporter::OO_Color - : ConsoleReporter::OO_None; - else - output_opts = IsTruthyFlagValue(FLAGS_benchmark_color) - ? ConsoleReporter::OO_Color - : ConsoleReporter::OO_None; - default_console_reporter = - internal::CreateReporter(FLAGS_benchmark_format, output_opts); + default_console_reporter = internal::CreateReporter( + FLAGS_benchmark_format, internal::GetOutputOptions()); console_reporter = default_console_reporter.get(); } auto& Out = console_reporter->GetOutputStream(); auto& Err = console_reporter->GetErrorStream(); std::string const& fname = FLAGS_benchmark_out; - if (fname == "" && file_reporter) { + if (fname.empty() && file_reporter) { Err << "A custom file reporter was provided but " "--benchmark_out= was not specified." << std::endl; std::exit(1); } - if (fname != "") { + if (!fname.empty()) { output_file.open(fname); if (!output_file.is_open()) { Err << "invalid file name: '" << fname << std::endl; @@ -609,6 +647,7 @@ " [--benchmark_out=]\n" " [--benchmark_out_format=]\n" " [--benchmark_color={auto|true|false}]\n" + " [--benchmark_counters_tabular={true|false}]\n" " [--v=]\n"); exit(0); } @@ -633,8 +672,10 @@ // "color_print" is the deprecated name for "benchmark_color". // TODO: Remove this. ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || + ParseBoolFlag(argv[i], "benchmark_counters_tabular", + &FLAGS_benchmark_counters_tabular) || ParseInt32Flag(argv[i], "v", &FLAGS_v)) { - for (int j = i; j != *argc; ++j) argv[j] = argv[j + 1]; + for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; --(*argc); --i; @@ -664,4 +705,11 @@ internal::LogLevel() = FLAGS_v; } +bool ReportUnrecognizedArguments(int argc, char** argv) { + for (int i = 1; i < argc; ++i) { + fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]); + } + return argc > 1; +} + } // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_api_internal.h @@ -1,7 +1,7 @@ #ifndef BENCHMARK_API_INTERNAL_H #define BENCHMARK_API_INTERNAL_H -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #include #include @@ -24,9 +24,11 @@ bool use_manual_time; BigO complexity; BigOFunc* complexity_lambda; + UserCounters counters; bool last_benchmark_instance; int repetitions; double min_time; + size_t iterations; int threads; // Number of concurrent threads to us }; @@ -34,13 +36,10 @@ std::vector* benchmarks, std::ostream* Err); -namespace { +bool IsZero(double n); -bool IsZero(double n) { - return std::abs(n) < std::numeric_limits::epsilon(); -} +ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); -} // end namespace } // end namespace internal } // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/benchmark_register.cc @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "check.h" @@ -69,6 +70,9 @@ // Registers a benchmark family and returns the index assigned to it. size_t AddBenchmark(std::unique_ptr family); + // Clear all registered benchmark families. + void ClearBenchmarks(); + // Extract the list of benchmark instances that match the specified // regular expression. bool FindBenchmarks(const std::string& re, @@ -94,6 +98,12 @@ return index; } +void BenchmarkFamilies::ClearBenchmarks() { + MutexLock l(mutex_); + families_.clear(); + families_.shrink_to_fit(); +} + bool BenchmarkFamilies::FindBenchmarks( const std::string& spec, std::vector* benchmarks, std::ostream* ErrStream) { @@ -143,6 +153,7 @@ instance.time_unit = family->time_unit_; instance.range_multiplier = family->range_multiplier_; instance.min_time = family->min_time_; + instance.iterations = family->iterations_; instance.repetitions = family->repetitions_; instance.use_real_time = family->use_real_time_; instance.use_manual_time = family->use_manual_time_; @@ -162,17 +173,18 @@ StringPrintF("%s:", family->arg_names_[arg_i].c_str()); } } - - AppendHumanReadable(arg, &instance.name); + + instance.name += StringPrintF("%d", arg); ++arg_i; } - if (!IsZero(family->min_time_)) { + if (!IsZero(family->min_time_)) instance.name += StringPrintF("/min_time:%0.3f", family->min_time_); - } - if (family->repetitions_ != 0) { + if (family->iterations_ != 0) + instance.name += StringPrintF("/iterations:%d", family->iterations_); + if (family->repetitions_ != 0) instance.name += StringPrintF("/repeats:%d", family->repetitions_); - } + if (family->use_manual_time_) { instance.name += "/manual_time"; } else if (family->use_real_time_) { @@ -219,6 +231,7 @@ time_unit_(kNanosecond), range_multiplier_(kRangeMultiplier), min_time_(0), + iterations_(0), repetitions_(0), use_real_time_(false), use_manual_time_(false), @@ -344,6 +357,22 @@ return this; } + +Benchmark* Benchmark::MinTime(double t) { + CHECK(t > 0.0); + CHECK(iterations_ == 0); + min_time_ = t; + return this; +} + + +Benchmark* Benchmark::Iterations(size_t n) { + CHECK(n > 0); + CHECK(IsZero(min_time_)); + iterations_ = n; + return this; +} + Benchmark* Benchmark::Repetitions(int n) { CHECK(n > 0); repetitions_ = n; @@ -355,12 +384,6 @@ return this; } -Benchmark* Benchmark::MinTime(double t) { - CHECK(t > 0.0); - min_time_ = t; - return this; -} - Benchmark* Benchmark::UseRealTime() { CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously."; @@ -436,4 +459,9 @@ void FunctionBenchmark::Run(State& st) { func_(st); } } // end namespace internal + +void ClearRegisteredBenchmarks() { + internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); +} + } // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/check.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/check.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/check.h @@ -3,6 +3,7 @@ #include #include +#include #include "internal_macros.h" #include "log.h" @@ -68,4 +69,11 @@ #define CHECK_GT(a, b) CHECK((a) > (b)) #define CHECK_LT(a, b) CHECK((a) < (b)) +#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) +#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) +#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) +#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) +#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) +#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) + #endif // CHECK_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc @@ -89,7 +89,7 @@ std::size_t size = 256; char local_buff[256]; - auto ret = std::vsnprintf(local_buff, size, msg, args_cp); + auto ret = vsnprintf(local_buff, size, msg, args_cp); va_end(args_cp); @@ -104,7 +104,7 @@ // we did not provide a long enough buffer on our first attempt. size = (size_t)ret + 1; // + 1 for the null byte std::unique_ptr buff(new char[size]); - ret = std::vsnprintf(buff.get(), size, msg, args); + ret = vsnprintf(buff.get(), size, msg, args); CHECK(ret > 0 && ((size_t)ret) < size); return buff.get(); } Index: MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc @@ -209,9 +209,9 @@ return (ParseFlagValue(str, flag, true) != nullptr); } -bool IsTruthyFlagValue(const std::string& str) { - if (str.empty()) return true; - char ch = str[0]; +bool IsTruthyFlagValue(const std::string& value) { + if (value.empty()) return true; + char ch = value[0]; return isalnum(ch) && !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N'); } Index: MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.h @@ -21,8 +21,7 @@ #include #include -#include "benchmark/benchmark_api.h" -#include "benchmark/reporter.h" +#include "benchmark/benchmark.h" namespace benchmark { Index: MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/complexity.cc @@ -15,7 +15,7 @@ // Source project : https://github.com/ismaelJimenez/cpp.leastsq // Adapted to be used with google benchmark -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #include #include @@ -35,9 +35,9 @@ case oNCubed: return [](int n) -> double { return std::pow(n, 3); }; case oLogN: - return [](int n) { return std::log2(n); }; + return [](int n) { return log2(n); }; case oNLogN: - return [](int n) { return n * std::log2(n); }; + return [](int n) { return n * log2(n); }; case o1: default: return [](int) { return 1.0; }; @@ -171,6 +171,22 @@ // All repetitions should be run with the same number of iterations so we // can take this information from the first benchmark. int64_t const run_iterations = reports.front().iterations; + // create stats for user counters + struct CounterStat { + Counter c; + Stat1_d s; + }; + std::map< std::string, CounterStat > counter_stats; + for(Run const& r : reports) { + for(auto const& cnt : r.counters) { + auto it = counter_stats.find(cnt.first); + if(it == counter_stats.end()) { + counter_stats.insert({cnt.first, {cnt.second, Stat1_d{}}}); + } else { + CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); + } + } + } // Populate the accumulators. for (Run const& run : reports) { @@ -178,11 +194,17 @@ CHECK_EQ(run_iterations, run.iterations); if (run.error_occurred) continue; real_accumulated_time_stat += - Stat1_d(run.real_accumulated_time / run.iterations, run.iterations); + Stat1_d(run.real_accumulated_time / run.iterations); cpu_accumulated_time_stat += - Stat1_d(run.cpu_accumulated_time / run.iterations, run.iterations); - items_per_second_stat += Stat1_d(run.items_per_second, run.iterations); - bytes_per_second_stat += Stat1_d(run.bytes_per_second, run.iterations); + Stat1_d(run.cpu_accumulated_time / run.iterations); + items_per_second_stat += Stat1_d(run.items_per_second); + bytes_per_second_stat += Stat1_d(run.bytes_per_second); + // user counters + for(auto const& cnt : run.counters) { + auto it = counter_stats.find(cnt.first); + CHECK_NE(it, counter_stats.end()); + it->second.s += Stat1_d(cnt.second); + } } // Get the data from the accumulator to BenchmarkReporter::Run's. @@ -196,6 +218,11 @@ mean_data.bytes_per_second = bytes_per_second_stat.Mean(); mean_data.items_per_second = items_per_second_stat.Mean(); mean_data.time_unit = reports[0].time_unit; + // user counters + for(auto const& kv : counter_stats) { + auto c = Counter(kv.second.s.Mean(), counter_stats[kv.first].c.flags); + mean_data.counters[kv.first] = c; + } // Only add label to mean/stddev if it is same for all runs mean_data.report_label = reports[0].report_label; @@ -215,6 +242,11 @@ stddev_data.bytes_per_second = bytes_per_second_stat.StdDev(); stddev_data.items_per_second = items_per_second_stat.StdDev(); stddev_data.time_unit = reports[0].time_unit; + // user counters + for(auto const& kv : counter_stats) { + auto c = Counter(kv.second.s.StdDev(), counter_stats[kv.first].c.flags); + stddev_data.counters[kv.first] = c; + } results.push_back(mean_data); results.push_back(stddev_data); @@ -263,6 +295,11 @@ big_o.report_big_o = true; big_o.complexity = result_cpu.complexity; + // All the time results are reported after being multiplied by the + // time unit multiplier. But since RMS is a relative quantity it + // should not be multiplied at all. So, here, we _divide_ it by the + // multiplier so that when it is multiplied later the result is the + // correct one. double multiplier = GetTimeUnitMultiplier(reports[0].time_unit); // Only add label to mean/stddev if it is same for all runs @@ -275,6 +312,9 @@ rms.cpu_accumulated_time = result_cpu.rms / multiplier; rms.report_rms = true; rms.complexity = result_cpu.complexity; + // don't forget to keep the time unit, or we won't be able to + // recover the correct value. + rms.time_unit = reports[0].time_unit; results.push_back(big_o); results.push_back(rms); Index: MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/console_reporter.cc @@ -12,8 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/reporter.h" +#include "benchmark/benchmark.h" #include "complexity.h" +#include "counter.h" #include #include @@ -34,27 +35,59 @@ bool ConsoleReporter::ReportContext(const Context& context) { name_field_width_ = context.name_field_width; + printed_header_ = false; + prev_counters_.clear(); PrintBasicContext(&GetErrorStream(), context); #ifdef BENCHMARK_OS_WINDOWS - if (color_output_ && &std::cout != &GetOutputStream()) { + if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream()) { GetErrorStream() << "Color printing is only supported for stdout on windows." " Disabling color printing\n"; - color_output_ = false; + output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); } #endif - std::string str = - FormatString("%-*s %13s %13s %10s\n", static_cast(name_field_width_), - "Benchmark", "Time", "CPU", "Iterations"); - GetOutputStream() << str << std::string(str.length() - 1, '-') << "\n"; return true; } +void ConsoleReporter::PrintHeader(const Run& run) { + std::string str = FormatString("%-*s %13s %13s %10s", static_cast(name_field_width_), + "Benchmark", "Time", "CPU", "Iterations"); + if(!run.counters.empty()) { + if(output_options_ & OO_Tabular) { + for(auto const& c : run.counters) { + str += FormatString(" %10s", c.first.c_str()); + } + } else { + str += " UserCounters..."; + } + } + str += "\n"; + std::string line = std::string(str.length(), '-'); + GetOutputStream() << line << "\n" << str << line << "\n"; +} + void ConsoleReporter::ReportRuns(const std::vector& reports) { - for (const auto& run : reports) PrintRunData(run); + for (const auto& run : reports) { + // print the header: + // --- if none was printed yet + bool print_header = !printed_header_; + // --- or if the format is tabular and this run + // has different fields from the prev header + print_header |= (output_options_ & OO_Tabular) && + (!internal::SameNames(run.counters, prev_counters_)); + if (print_header) { + printed_header_ = true; + prev_counters_ = run.counters; + PrintHeader(run); + } + // As an alternative to printing the headers like this, we could sort + // the benchmarks by header and then print. But this would require + // waiting for the full results before printing, or printing twice. + PrintRunData(run); + } } static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, @@ -68,8 +101,8 @@ void ConsoleReporter::PrintRunData(const Run& result) { typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); auto& Out = GetOutputStream(); - PrinterFn* printer = - color_output_ ? (PrinterFn*)ColorPrintf : IgnoreColorPrint; + PrinterFn* printer = (output_options_ & OO_Color) ? + (PrinterFn*)ColorPrintf : IgnoreColorPrint; auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; printer(Out, name_color, "%-*s ", name_field_width_, @@ -114,6 +147,21 @@ printer(Out, COLOR_CYAN, "%10lld", result.iterations); } + for (auto& c : result.counters) { + auto const& s = HumanReadableNumber(c.second.value); + if (output_options_ & OO_Tabular) { + if (c.second.flags & Counter::kIsRate) { + printer(Out, COLOR_DEFAULT, " %8s/s", s.c_str()); + } else { + printer(Out, COLOR_DEFAULT, " %10s", s.c_str()); + } + } else { + const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : ""; + printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), + unit); + } + } + if (!rate.empty()) { printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str()); } Index: MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h @@ -11,11 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#ifndef BENCHMARK_BENCHMARK_H_ -#define BENCHMARK_BENCHMARK_H_ -#include "benchmark_api.h" -#include "macros.h" -#include "reporter.h" +#include "benchmark/benchmark.h" -#endif // BENCHMARK_BENCHMARK_H_ +namespace benchmark { + +// these counter-related functions are hidden to reduce API surface. +namespace internal { +void Finish(UserCounters *l, double time, double num_threads); +void Increment(UserCounters *l, UserCounters const& r); +bool SameNames(UserCounters const& l, UserCounters const& r); +} // end namespace internal + +} //end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "counter.h" + +namespace benchmark { +namespace internal { + +double Finish(Counter const& c, double cpu_time, double num_threads) { + double v = c.value; + if (c.flags & Counter::kIsRate) { + v /= cpu_time; + } + if (c.flags & Counter::kAvgThreads) { + v /= num_threads; + } + return v; +} + +void Finish(UserCounters *l, double cpu_time, double num_threads) { + for (auto &c : *l) { + c.second.value = Finish(c.second, cpu_time, num_threads); + } +} + +void Increment(UserCounters *l, UserCounters const& r) { + // add counters present in both or just in *l + for (auto &c : *l) { + auto it = r.find(c.first); + if (it != r.end()) { + c.second.value = c.second + it->second; + } + } + // add counters present in r, but not in *l + for (auto const &tc : r) { + auto it = l->find(tc.first); + if (it == l->end()) { + (*l)[tc.first] = tc.second; + } + } +} + +bool SameNames(UserCounters const& l, UserCounters const& r) { + if (&l == &r) return true; + if (l.size() != r.size()) { + return false; + } + for (auto const& c : l) { + if (r.find(c.first) == r.end()) { + return false; + } + } + return true; +} + +} // end namespace internal +} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/reporter.h" +#include "benchmark/benchmark.h" #include "complexity.h" #include @@ -24,6 +24,7 @@ #include "string_util.h" #include "timers.h" +#include "check.h" // File format reference: http://edoceo.com/utilitas/csv-file-format. @@ -34,25 +35,55 @@ "name", "iterations", "real_time", "cpu_time", "time_unit", "bytes_per_second", "items_per_second", "label", "error_occurred", "error_message"}; -} +} // namespace bool CSVReporter::ReportContext(const Context& context) { PrintBasicContext(&GetErrorStream(), context); + return true; +} +void CSVReporter::ReportRuns(const std::vector & reports) { std::ostream& Out = GetOutputStream(); - for (auto B = elements.begin(); B != elements.end();) { - Out << *B++; - if (B != elements.end()) Out << ","; + + if (!printed_header_) { + // save the names of all the user counters + for (const auto& run : reports) { + for (const auto& cnt : run.counters) { + user_counter_names_.insert(cnt.first); + } + } + + // print the header + for (auto B = elements.begin(); B != elements.end();) { + Out << *B++; + if (B != elements.end()) Out << ","; + } + for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) { + Out << ",\"" << *B++ << "\""; + } + Out << "\n"; + + printed_header_ = true; + } else { + // check that all the current counters are saved in the name set + for (const auto& run : reports) { + for (const auto& cnt : run.counters) { + CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) + << "All counters must be present in each run. " + << "Counter named \"" << cnt.first + << "\" was not in a run after being added to the header"; + } + } + } + + // print results for each run + for (const auto& run : reports) { + PrintRunData(run); } - Out << "\n"; - return true; -} -void CSVReporter::ReportRuns(const std::vector& reports) { - for (const auto& run : reports) PrintRunData(run); } -void CSVReporter::PrintRunData(const Run& run) { +void CSVReporter::PrintRunData(const Run & run) { std::ostream& Out = GetOutputStream(); // Field with embedded double-quote characters must be doubled and the field @@ -102,6 +133,16 @@ Out << "\"" << label << "\""; } Out << ",,"; // for error_occurred and error_message + + // Print user counters + for (const auto &ucn : user_counter_names_) { + auto it = run.counters.find(ucn); + if(it == run.counters.end()) { + Out << ","; + } else { + Out << "," << it->second; + } + } Out << '\n'; } Index: MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h @@ -23,7 +23,7 @@ #include -#include "benchmark/macros.h" +#include "benchmark/benchmark.h" #include "internal_macros.h" #if defined(BENCHMARK_OS_MACOSX) @@ -43,6 +43,11 @@ #ifndef BENCHMARK_OS_WINDOWS #include +#include +#endif + +#ifdef BENCHMARK_OS_EMSCRIPTEN +#include #endif namespace benchmark { @@ -65,6 +70,10 @@ // counter pauses; it does not continue counting, nor does it // reset to zero. return mach_absolute_time(); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // this goes above x86-specific code because old versions of Emscripten + // define __x86_64__, although they have nothing to do with it. + return static_cast(emscripten_get_now() * 1e+6); #elif defined(__i386__) int64_t ret; __asm__ volatile("rdtsc" : "=A"(ret)); @@ -79,7 +88,7 @@ asm("mftbu %0" : "=r"(tbu0)); asm("mftb %0" : "=r"(tbl)); asm("mftbu %0" : "=r"(tbu1)); - tbl &= -static_cast(tbu0 == tbu1); + tbl &= -static_cast(tbu0 == tbu1); // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage) return (tbu1 << 32) | tbl; #elif defined(__sparc__) @@ -99,6 +108,22 @@ _asm rdtsc #elif defined(COMPILER_MSVC) return __rdtsc(); +#elif defined(BENCHMARK_OS_NACL) + // Native Client validator on x86/x86-64 allows RDTSC instructions, + // and this case is handled above. Native Client validator on ARM + // rejects MRC instructions (used in the ARM-specific sequence below), + // so we handle it here. Portable Native Client compiles to + // architecture-agnostic bytecode, which doesn't provide any + // cycle counter access mnemonics. + + // Native Client does not provide any API to access cycle counter. + // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday + // because is provides nanosecond resolution (which is noticable at + // least for PNaCl modules running on x86 Mac & Linux). + // Initialize to always return 0 if clock_gettime fails. + struct timespec ts = { 0, 0 }; + clock_gettime(CLOCK_MONOTONIC, &ts); + return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; #elif defined(__aarch64__) // System timer of ARMv8 runs at a different frequency than the CPU's. // The frequency is fixed, typically in the range 1-50MHz. It can be @@ -108,7 +133,9 @@ asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); return virtual_timer_value; #elif defined(__ARM_ARCH) -#if (__ARM_ARCH >= 6) // V6 is the earliest arch that has a standard cyclecount + // V6 is the earliest arch that has a standard cyclecount + // Native Client validator doesn't allow MRC instructions. +#if (__ARM_ARCH >= 6) uint32_t pmccntr; uint32_t pmuseren; uint32_t pmcntenset; Index: MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h @@ -1,7 +1,7 @@ #ifndef BENCHMARK_INTERNAL_MACROS_H_ #define BENCHMARK_INTERNAL_MACROS_H_ -#include "benchmark/macros.h" +#include "benchmark/benchmark.h" #ifndef __has_feature #define __has_feature(x) 0 @@ -30,13 +30,28 @@ #elif defined(_WIN32) #define BENCHMARK_OS_WINDOWS 1 #elif defined(__APPLE__) -// TODO(ericwf) This doesn't actually check that it is a Mac OSX system. Just -// that it is an apple system. -#define BENCHMARK_OS_MACOSX 1 +#include "TargetConditionals.h" + #if defined(TARGET_OS_MAC) + #define BENCHMARK_OS_MACOSX 1 + #if defined(TARGET_OS_IPHONE) + #define BENCHMARK_OS_IOS 1 + #endif + #endif #elif defined(__FreeBSD__) #define BENCHMARK_OS_FREEBSD 1 #elif defined(__linux__) #define BENCHMARK_OS_LINUX 1 +#elif defined(__native_client__) +#define BENCHMARK_OS_NACL 1 +#elif defined(EMSCRIPTEN) +#define BENCHMARK_OS_EMSCRIPTEN 1 +#elif defined(__rtems__) +#define BENCHMARK_OS_RTEMS 1 +#endif + +#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ + && !defined(__EXCEPTIONS) +#define BENCHMARK_HAS_NO_EXCEPTIONS #endif #endif // BENCHMARK_INTERNAL_MACROS_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/json_reporter.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/reporter.h" +#include "benchmark/benchmark.h" #include "complexity.h" #include @@ -154,10 +154,15 @@ << indent << FormatKV("items_per_second", RoundDouble(run.items_per_second)); } + for(auto &c : run.counters) { + out << ",\n" + << indent + << FormatKV(c.first, RoundDouble(c.second)); + } if (!run.report_label.empty()) { out << ",\n" << indent << FormatKV("label", run.report_label); } out << '\n'; } -} // end namespace benchmark +} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/log.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/log.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/log.h @@ -4,7 +4,7 @@ #include #include -#include "benchmark/macros.h" +#include "benchmark/benchmark.h" namespace benchmark { namespace internal { @@ -70,4 +70,4 @@ (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ " ") -#endif \ No newline at end of file +#endif Index: MicroBenchmarks/libs/benchmark-1.2.0/src/re.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/re.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/re.h @@ -15,6 +15,15 @@ #ifndef BENCHMARK_RE_H_ #define BENCHMARK_RE_H_ +#include "internal_macros.h" + +// Prefer C regex libraries when compiling w/o exceptions so that we can +// correctly report errors. +#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \ + (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) +#undef HAVE_STD_REGEX +#endif + #if defined(HAVE_STD_REGEX) #include #elif defined(HAVE_GNU_POSIX_REGEX) @@ -62,15 +71,20 @@ #if defined(HAVE_STD_REGEX) inline bool Regex::Init(const std::string& spec, std::string* error) { +#ifdef BENCHMARK_HAS_NO_EXCEPTIONS + ((void)error); // suppress unused warning +#else try { +#endif re_ = std::regex(spec, std::regex_constants::extended); - init_ = true; +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS } catch (const std::regex_error& e) { if (error) { *error = e.what(); } } +#endif return init_; } Index: MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/reporter.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/reporter.h" +#include "benchmark/benchmark.h" #include "timers.h" #include @@ -31,10 +31,10 @@ BenchmarkReporter::~BenchmarkReporter() {} -void BenchmarkReporter::PrintBasicContext(std::ostream *out_ptr, +void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context) { - CHECK(out_ptr) << "cannot be null"; - auto &Out = *out_ptr; + CHECK(out) << "cannot be null"; + auto &Out = *out; Out << "Run on (" << context.num_cpus << " X " << context.mhz_per_cpu << " MHz CPU " << ((context.num_cpus > 1) ? "s" : "") << ")\n"; Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h @@ -0,0 +1,15 @@ +#ifndef BENCHMARK_SLEEP_H_ +#define BENCHMARK_SLEEP_H_ + +namespace benchmark { +const int kNumMillisPerSecond = 1000; +const int kNumMicrosPerMilli = 1000; +const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; +const int kNumNanosPerMicro = 1000; +const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; + +void SleepForMilliseconds(int milliseconds); +void SleepForSeconds(double seconds); +} // end namespace benchmark + +#endif // BENCHMARK_SLEEP_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc @@ -15,6 +15,7 @@ #include "sleep.h" #include +#include #include #include "internal_macros.h" @@ -40,7 +41,7 @@ } void SleepForMilliseconds(int milliseconds) { - SleepForMicroseconds(static_cast(milliseconds) * kNumMicrosPerMilli); + SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); } void SleepForSeconds(double seconds) { Index: MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h +++ MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h @@ -119,18 +119,22 @@ if (numsamples_ == 0) return VType(); VType mean = sum_ * (1.0 / numsamples_); if (stddev) { - VType avg_squares = sum_squares_ * (1.0 / numsamples_); - *stddev = Sqrt(avg_squares - Sqr(mean)); + // Sample standard deviation is undefined for n = 1 + if (numsamples_ == 1) { + *stddev = VType(); + } else { + VType avg_squares = sum_squares_ * (1.0 / numsamples_); + *stddev = Sqrt(numsamples_ / (numsamples_ - 1.0) * (avg_squares - Sqr(mean))); + } } return mean; } // Return the standard deviation of the sample set VType StdDev() const { - if (numsamples_ == 0) return VType(); - VType mean = Mean(); - VType avg_squares = sum_squares_ * (1.0 / numsamples_); - return Sqrt(avg_squares - Sqr(mean)); + VType stddev = VType(); + Mean(&stddev); + return stddev; } private: Index: MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/string_util.cc @@ -45,6 +45,8 @@ std::max(thresh, 1.0 / std::pow(10.0, precision)); const double big_threshold = adjusted_threshold * one_k; const double small_threshold = adjusted_threshold; + // Values in ]simple_threshold,small_threshold[ will be printed as-is + const double simple_threshold = 0.01; if (val > big_threshold) { // Positive powers @@ -62,14 +64,16 @@ *exponent = 0; } else if (val < small_threshold) { // Negative powers - double scaled = val; - for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { - scaled *= one_k; - if (scaled >= small_threshold) { - mantissa_stream << scaled; - *exponent = -static_cast(i + 1); - *mantissa = mantissa_stream.str(); - return; + if (val < simple_threshold) { + double scaled = val; + for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i) { + scaled *= one_k; + if (scaled >= small_threshold) { + mantissa_stream << scaled; + *exponent = -static_cast(i + 1); + *mantissa = mantissa_stream.str(); + return; + } } } mantissa_stream << val; Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc @@ -75,7 +75,9 @@ char line[1024]; char* err; memset(line, '\0', sizeof(line)); - CHECK(read(fd, line, sizeof(line) - 1)); + ssize_t read_err = read(fd, line, sizeof(line) - 1); + ((void)read_err); // prevent unused warning + CHECK(read_err >= 0); const long temp_value = strtol(line, &err, 10); if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { *value = temp_value; @@ -295,8 +297,13 @@ (size == sizeof(cpu_freq))) { cpuinfo_cycles_per_second = cpu_freq; } else { + #if defined BENCHMARK_OS_IOS + fprintf(stderr, "CPU frequency cannot be detected. \n"); + cpuinfo_cycles_per_second = 0; + #else fprintf(stderr, "%s\n", strerror(errno)); std::exit(EXIT_FAILURE); + #endif } #else // Generic cycles per second counter Index: MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc @@ -35,6 +35,10 @@ #endif #endif +#ifdef BENCHMARK_OS_EMSCRIPTEN +#include +#endif + #include #include #include @@ -100,14 +104,7 @@ } // end namespace double ProcessCPUUsage() { -// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See -// https://github.com/google/benchmark/pull/292 -#if defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - struct timespec spec; - if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) - return MakeTime(spec); - DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); -#elif defined(BENCHMARK_OS_WINDOWS) +#if defined(BENCHMARK_OS_WINDOWS) HANDLE proc = GetCurrentProcess(); FILETIME creation_time; FILETIME exit_time; @@ -117,21 +114,28 @@ &user_time)) return MakeTime(kernel_time, user_time); DiagnoseAndExit("GetProccessTimes() failed"); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. + // Use Emscripten-specific API. Reported CPU time would be exactly the + // same as total time, but this is ok because there aren't long-latency + // syncronous system calls in Emscripten. + return emscripten_get_now() * 1e-3; +#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See + // https://github.com/google/benchmark/pull/292 + struct timespec spec; + if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) + return MakeTime(spec); + DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); #else struct rusage ru; if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); - DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); + DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); #endif } double ThreadCPUUsage() { -// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See -// https://github.com/google/benchmark/pull/292 -#if defined(CLOCK_THREAD_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - struct timespec ts; - if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); - DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); -#elif defined(BENCHMARK_OS_WINDOWS) +#if defined(BENCHMARK_OS_WINDOWS) HANDLE this_thread = GetCurrentThread(); FILETIME creation_time; FILETIME exit_time; @@ -141,6 +145,8 @@ &user_time); return MakeTime(kernel_time, user_time); #elif defined(BENCHMARK_OS_MACOSX) + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See + // https://github.com/google/benchmark/pull/292 mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; thread_basic_info_data_t info; mach_port_t thread = pthread_mach_thread_np(pthread_self()); @@ -149,6 +155,17 @@ return MakeTime(info); } DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); +#elif defined(BENCHMARK_OS_EMSCRIPTEN) + // Emscripten doesn't support traditional threads + return ProcessCPUUsage(); +#elif defined(BENCHMARK_OS_RTEMS) + // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See + // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c + return ProcessCPUUsage(); +#elif defined(CLOCK_THREAD_CPUTIME_ID) + struct timespec ts; + if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); + DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); #else #error Per-thread timing is not available on your system. #endif Index: MicroBenchmarks/libs/benchmark-1.2.0/test/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/CMakeLists.txt +++ MicroBenchmarks/libs/benchmark-1.2.0/test/CMakeLists.txt @@ -1,6 +1,26 @@ # Enable the tests find_package(Threads REQUIRED) +include(CheckCXXCompilerFlag) + +# NOTE: Some tests use `` to perform the test. Therefore we must +# strip -DNDEBUG from the default CMake flags in DEBUG mode. +string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE) +if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) + add_definitions( -UNDEBUG ) + add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) + # Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines. + foreach (flags_var_to_scrub + CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS_MINSIZEREL + CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_C_FLAGS_MINSIZEREL) + string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " " + "${flags_var_to_scrub}" "${${flags_var_to_scrub}}") + endforeach() +endif() # NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise # they will break the configuration check. @@ -8,7 +28,7 @@ list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}) endif() -add_library(output_test_helper STATIC output_test_helper.cc) +add_library(output_test_helper STATIC output_test_helper.cc output_test.h) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") @@ -56,6 +76,11 @@ add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01) compile_benchmark_test(donotoptimize_test) +# Some of the issues with DoNotOptimize only occur when optimization is enabled +check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) +if (BENCHMARK_HAS_O3_FLAG) + set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") +endif() add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01) compile_benchmark_test(fixture_test) @@ -73,6 +98,12 @@ compile_output_test(reporter_output_test) add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01) +compile_output_test(user_counters_test) +add_test(user_counters_test user_counters_test --benchmark_min_time=0.01) + +compile_output_test(user_counters_tabular_test) +add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) + check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) if (BENCHMARK_HAS_CXX03_FLAG) set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}") Index: MicroBenchmarks/libs/benchmark-1.2.0/test/basic_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/basic_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/basic_test.cc @@ -1,5 +1,5 @@ -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) Index: MicroBenchmarks/libs/benchmark-1.2.0/test/benchmark_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/benchmark_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/benchmark_test.cc @@ -150,7 +150,7 @@ BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); static void BM_ParallelMemset(benchmark::State& state) { - int size = state.range(0) / sizeof(int); + int size = state.range(0) / static_cast(sizeof(int)); int thread_size = size / state.threads; int from = thread_size * state.thread_index; int to = from + thread_size; @@ -209,8 +209,7 @@ std::pair(42, 3.8)); void BM_non_template_args(benchmark::State& state, int, double) { - while (state.KeepRunning()) { - } + while(state.KeepRunning()) {} } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); Index: MicroBenchmarks/libs/benchmark-1.2.0/test/complexity_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/complexity_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/complexity_test.cc @@ -141,7 +141,7 @@ BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int n) { return n * std::log2(n); }); + ->Complexity([](int n) { return n * log2(n); }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) Index: MicroBenchmarks/libs/benchmark-1.2.0/test/cxx03_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/cxx03_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/cxx03_test.cc @@ -39,4 +39,10 @@ BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE1(BM_template1, int); +void BM_counters(benchmark::State& state) { + BM_empty(state); + state.counters["Foo"] = 2; +} +BENCHMARK(BM_counters); + BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.2.0/test/diagnostics_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/diagnostics_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/diagnostics_test.cc @@ -11,7 +11,7 @@ #include #include "../src/check.h" -#include "benchmark/benchmark_api.h" +#include "benchmark/benchmark.h" #if defined(__GNUC__) && !defined(__EXCEPTIONS) #define TEST_HAS_NO_EXCEPTIONS @@ -26,7 +26,7 @@ } void try_invalid_pause_resume(benchmark::State& state) { -#if !defined(NDEBUG) && !defined(TEST_HAS_NO_EXCEPTIONS) +#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) try { state.PauseTiming(); std::abort(); Index: MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc @@ -9,6 +9,22 @@ std::uint64_t double_up(const std::uint64_t x) { return x * 2; } } +// Using DoNotOptimize on types like BitRef seem to cause a lot of problems +// with the inline assembly on both GCC and Clang. +struct BitRef { + int index; + unsigned char &byte; + +public: + static BitRef Make() { + static unsigned char arr[2] = {}; + BitRef b(1, arr[0]); + return b; + } +private: + BitRef(int i, unsigned char& b) : index(i), byte(b) {} +}; + int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types @@ -29,5 +45,8 @@ benchmark::DoNotOptimize(double_up(x)); - return 0; + // These tests are to e + benchmark::DoNotOptimize(BitRef::Make()); + BitRef lval = BitRef::Make(); + benchmark::DoNotOptimize(lval); } Index: MicroBenchmarks/libs/benchmark-1.2.0/test/options_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/options_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/options_test.cc @@ -1,8 +1,12 @@ -#include "benchmark/benchmark_api.h" - +#include "benchmark/benchmark.h" #include #include +#if defined(NDEBUG) +#undef NDEBUG +#endif +#include + void BM_basic(benchmark::State& state) { while (state.KeepRunning()) { } @@ -40,4 +44,22 @@ BENCHMARK(BM_basic)->Apply(CustomArgs); +void BM_explicit_iteration_count(benchmark::State& st) { + // Test that benchmarks specified with an explicit iteration count are + // only run once. + static bool invoked_before = false; + assert(!invoked_before); + invoked_before = true; + + // Test that the requested iteration count is respected. + assert(st.max_iterations == 42); + size_t actual_iterations = 0; + while (st.KeepRunning()) + ++actual_iterations; + assert(st.iterations() == st.max_iterations); + assert(st.iterations() == 42); + +} +BENCHMARK(BM_explicit_iteration_count)->Iterations(42); + BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h @@ -0,0 +1,201 @@ +#ifndef TEST_OUTPUT_TEST_H +#define TEST_OUTPUT_TEST_H + +#undef NDEBUG +#include +#include +#include +#include +#include +#include +#include + +#include "../src/re.h" +#include "benchmark/benchmark.h" + +#define CONCAT2(x, y) x##y +#define CONCAT(x, y) CONCAT2(x, y) + +#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) + +#define SET_SUBSTITUTIONS(...) \ + int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__) + +enum MatchRules { + MR_Default, // Skip non-matching lines until a match is found. + MR_Next, // Match must occur on the next line. + MR_Not // No line between the current position and the next match matches + // the regex +}; + +struct TestCase { + TestCase(std::string re, int rule = MR_Default); + + std::string regex_str; + int match_rule; + std::string substituted_regex; + std::shared_ptr regex; +}; + +enum TestCaseID { + TC_ConsoleOut, + TC_ConsoleErr, + TC_JSONOut, + TC_JSONErr, + TC_CSVOut, + TC_CSVErr, + + TC_NumID // PRIVATE +}; + +// Add a list of test cases to be run against the output specified by +// 'ID' +int AddCases(TestCaseID ID, std::initializer_list il); + +// Add or set a list of substitutions to be performed on constructed regex's +// See 'output_test_helper.cc' for a list of default substitutions. +int SetSubstitutions( + std::initializer_list> il); + +// Run all output tests. +void RunOutputTests(int argc, char* argv[]); + +// ========================================================================= // +// ------------------------- Results checking ------------------------------ // +// ========================================================================= // + +// Call this macro to register a benchmark for checking its results. This +// should be all that's needed. It subscribes a function to check the (CSV) +// results of a benchmark. This is done only after verifying that the output +// strings are really as expected. +// bm_name_pattern: a name or a regex pattern which will be matched against +// all the benchmark names. Matching benchmarks +// will be the subject of a call to checker_function +// checker_function: should be of type ResultsCheckFn (see below) +#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ + size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) + +struct Results; +typedef std::function< void(Results const&) > ResultsCheckFn; + +size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); + +// Class holding the results of a benchmark. +// It is passed in calls to checker functions. +struct Results { + + // the benchmark name + std::string name; + // the benchmark fields + std::map< std::string, std::string > values; + + Results(const std::string& n) : name(n) {} + + int NumThreads() const; + + typedef enum { kCpuTime, kRealTime } BenchmarkTime; + + // get cpu_time or real_time in seconds + double GetTime(BenchmarkTime which) const; + + // get the real_time duration of the benchmark in seconds. + // it is better to use fuzzy float checks for this, as the float + // ASCII formatting is lossy. + double DurationRealTime() const { + return GetAs< double >("iterations") * GetTime(kRealTime); + } + // get the cpu_time duration of the benchmark in seconds + double DurationCPUTime() const { + return GetAs< double >("iterations") * GetTime(kCpuTime); + } + + // get the string for a result by name, or nullptr if the name + // is not found + const std::string* Get(const char* entry_name) const { + auto it = values.find(entry_name); + if(it == values.end()) return nullptr; + return &it->second; + } + + // get a result by name, parsed as a specific type. + // NOTE: for counters, use GetCounterAs instead. + template + T GetAs(const char* entry_name) const; + + // counters are written as doubles, so they have to be read first + // as a double, and only then converted to the asked type. + template + T GetCounterAs(const char* entry_name) const { + double dval = GetAs< double >(entry_name); + T tval = static_cast< T >(dval); + return tval; + } +}; + +template +T Results::GetAs(const char* entry_name) const { + auto *sv = Get(entry_name); + CHECK(sv != nullptr && !sv->empty()); + std::stringstream ss; + ss << *sv; + T out; + ss >> out; + CHECK(!ss.fail()); + return out; +} + +//---------------------------------- +// Macros to help in result checking. Do not use them with arguments causing +// side-effects. + +#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ + CONCAT(CHECK_, relationship) \ + (entry.getfn< var_type >(var_name), (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" + +// check with tolerance. eps_factor is the tolerance window, which is +// interpreted relative to value (eg, 0.1 means 10% of value). +#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ + CONCAT(CHECK_FLOAT_, relationship) \ + (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "expected (" << #var_type << ")" << (var_name) \ + << "=" << (entry).getfn< var_type >(var_name) \ + << " to be " #relationship " to " << (value) << "\n" \ + << __FILE__ << ":" << __LINE__ << ": " \ + << "with tolerance of " << (eps_factor) * (value) \ + << " (" << (eps_factor)*100. << "%), " \ + << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \ + << " (" << (((entry).getfn< var_type >(var_name) - (value)) \ + / \ + ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \ + << "%)" + +#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) + +#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ + _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) + +#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) + +#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ + _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + +// ========================================================================= // +// --------------------------- Misc Utilities ------------------------------ // +// ========================================================================= // + +namespace { + +const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; + +} // end namespace + +#endif // TEST_OUTPUT_TEST_H Index: MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc @@ -2,10 +2,12 @@ #include #include #include +#include #include "../src/check.h" // NOTE: check.h is for internal use only! #include "../src/re.h" // NOTE: re.h is for internal use only #include "output_test.h" +#include "../src/benchmark_api_internal.h" // ========================================================================= // // ------------------------------ Internals -------------------------------- // @@ -31,21 +33,29 @@ SubMap& GetSubstitutions() { // Don't use 'dec_re' from header because it may not yet be initialized. - static std::string dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; + static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, + // human-readable float + {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, {"%time", "[ ]*[0-9]{1,5} ns"}, {"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"}, {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"}, - {"%csv_report", "[0-9]+," + dec_re + "," + dec_re + ",ns,,,,,"}, - {"%csv_us_report", "[0-9]+," + dec_re + "," + dec_re + ",us,,,,,"}, + {"%csv_header", + "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," + "items_per_second,label,error_occurred,error_message"}, + {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, + {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, {"%csv_bytes_report", - "[0-9]+," + dec_re + "," + dec_re + ",ns," + dec_re + ",,,,"}, + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, {"%csv_items_report", - "[0-9]+," + dec_re + "," + dec_re + ",ns,," + dec_re + ",,,"}, - {"%csv_label_report_begin", "[0-9]+," + dec_re + "," + dec_re + ",ns,,,"}, + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"}, + {"%csv_bytes_items_report", + "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + + "," + safe_dec_re + ",,,"}, + {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, {"%csv_label_report_end", ",,"}}; return map; } @@ -140,8 +150,179 @@ std::vector reporters_; }; } + +} // end namespace internal + +// ========================================================================= // +// -------------------------- Results checking ----------------------------- // +// ========================================================================= // + +namespace internal { + +// Utility class to manage subscribers for checking benchmark results. +// It works by parsing the CSV output to read the results. +class ResultsChecker { + public: + + struct PatternAndFn : public TestCase { // reusing TestCase for its regexes + PatternAndFn(const std::string& rx, ResultsCheckFn fn_) + : TestCase(rx), fn(fn_) {} + ResultsCheckFn fn; + }; + + std::vector< PatternAndFn > check_patterns; + std::vector< Results > results; + std::vector< std::string > field_names; + + void Add(const std::string& entry_pattern, ResultsCheckFn fn); + + void CheckResults(std::stringstream& output); + + private: + + void SetHeader_(const std::string& csv_header); + void SetValues_(const std::string& entry_csv_line); + + std::vector< std::string > SplitCsv_(const std::string& line); + +}; + +// store the static ResultsChecker in a function to prevent initialization +// order problems +ResultsChecker& GetResultsChecker() { + static ResultsChecker rc; + return rc; +} + +// add a results checker for a benchmark +void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { + check_patterns.emplace_back(entry_pattern, fn); +} + +// check the results of all subscribed benchmarks +void ResultsChecker::CheckResults(std::stringstream& output) { + // first reset the stream to the start + { + auto start = std::ios::streampos(0); + // clear before calling tellg() + output.clear(); + // seek to zero only when needed + if(output.tellg() > start) output.seekg(start); + // and just in case + output.clear(); + } + // now go over every line and publish it to the ResultsChecker + std::string line; + bool on_first = true; + while (output.eof() == false) { + CHECK(output.good()); + std::getline(output, line); + if (on_first) { + SetHeader_(line); // this is important + on_first = false; + continue; + } + SetValues_(line); + } + // finally we can call the subscribed check functions + for(const auto& p : check_patterns) { + VLOG(2) << "--------------------------------\n"; + VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; + for(const auto& r : results) { + if(!p.regex->Match(r.name)) { + VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; + continue; + } else { + VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; + } + VLOG(1) << "Checking results of " << r.name << ": ... \n"; + p.fn(r); + VLOG(1) << "Checking results of " << r.name << ": OK.\n"; + } + } +} + +// prepare for the names in this header +void ResultsChecker::SetHeader_(const std::string& csv_header) { + field_names = SplitCsv_(csv_header); +} + +// set the values for a benchmark +void ResultsChecker::SetValues_(const std::string& entry_csv_line) { + if(entry_csv_line.empty()) return; // some lines are empty + CHECK(!field_names.empty()); + auto vals = SplitCsv_(entry_csv_line); + CHECK_EQ(vals.size(), field_names.size()); + results.emplace_back(vals[0]); // vals[0] is the benchmark name + auto &entry = results.back(); + for (size_t i = 1, e = vals.size(); i < e; ++i) { + entry.values[field_names[i]] = vals[i]; + } +} + +// a quick'n'dirty csv splitter (eliminating quotes) +std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) { + std::vector< std::string > out; + if(line.empty()) return out; + if(!field_names.empty()) out.reserve(field_names.size()); + size_t prev = 0, pos = line.find_first_of(','), curr = pos; + while(pos != line.npos) { + CHECK(curr > 0); + if(line[prev] == '"') ++prev; + if(line[curr-1] == '"') --curr; + out.push_back(line.substr(prev, curr-prev)); + prev = pos + 1; + pos = line.find_first_of(',', pos + 1); + curr = pos; + } + curr = line.size(); + if(line[prev] == '"') ++prev; + if(line[curr-1] == '"') --curr; + out.push_back(line.substr(prev, curr-prev)); + return out; +} + } // end namespace internal +size_t AddChecker(const char* bm_name, ResultsCheckFn fn) +{ + auto &rc = internal::GetResultsChecker(); + rc.Add(bm_name, fn); + return rc.results.size(); +} + +int Results::NumThreads() const { + auto pos = name.find("/threads:"); + if(pos == name.npos) return 1; + auto end = name.find('/', pos + 9); + std::stringstream ss; + ss << name.substr(pos + 9, end); + int num = 1; + ss >> num; + CHECK(!ss.fail()); + return num; +} + +double Results::GetTime(BenchmarkTime which) const { + CHECK(which == kCpuTime || which == kRealTime); + const char *which_str = which == kCpuTime ? "cpu_time" : "real_time"; + double val = GetAs< double >(which_str); + auto unit = Get("time_unit"); + CHECK(unit); + if(*unit == "ns") { + return val * 1.e-9; + } else if(*unit == "us") { + return val * 1.e-6; + } else if(*unit == "ms") { + return val * 1.e-3; + } else if(*unit == "s") { + return val; + } else { + CHECK(1 == 0) << "unknown time unit: " << *unit; + return 0; + } +} + // ========================================================================= // // -------------------------- Public API Definitions------------------------ // // ========================================================================= // @@ -186,7 +367,8 @@ void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); - benchmark::ConsoleReporter CR(benchmark::ConsoleReporter::OO_None); + auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true); + benchmark::ConsoleReporter CR(options); benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; struct ReporterTest { @@ -231,4 +413,11 @@ std::cout << "\n"; } + + // now that we know the output is as expected, we can dispatch + // the checks to subscribees. + auto &csv = TestCases[2]; + // would use == but gcc spits a warning + CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + internal::GetResultsChecker().CheckResults(csv.out_stream); } Index: MicroBenchmarks/libs/benchmark-1.2.0/test/register_benchmark_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/register_benchmark_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/register_benchmark_test.cc @@ -114,23 +114,23 @@ #endif #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK { - int x = 42; + const char* x = "42"; auto capturing_lam = [=](benchmark::State& st) { while (st.KeepRunning()) { } - st.SetLabel(std::to_string(x)); + st.SetLabel(x); }; benchmark::RegisterBenchmark("lambda_benchmark", capturing_lam); - AddCases({{"lambda_benchmark", "42"}}); + AddCases({{"lambda_benchmark", x}}); } #endif } -int main(int argc, char* argv[]) { +// Test that all benchmarks, registered at either during static init or runtime, +// are run and the results are passed to the reported. +void RunTestOne() { TestRegistrationAtRuntime(); - benchmark::Initialize(&argc, argv); - TestReporter test_reporter; benchmark::RunSpecifiedBenchmarks(&test_reporter); @@ -143,6 +143,40 @@ ++EB; } assert(EB == ExpectedResults.end()); +} - return 0; +// Test that ClearRegisteredBenchmarks() clears all previously registered +// benchmarks. +// Also test that new benchmarks can be registered and ran afterwards. +void RunTestTwo() { + assert(ExpectedResults.size() != 0 && + "must have at least one registered benchmark"); + ExpectedResults.clear(); + benchmark::ClearRegisteredBenchmarks(); + + TestReporter test_reporter; + size_t num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == 0); + assert(test_reporter.all_runs_.begin() == test_reporter.all_runs_.end()); + + TestRegistrationAtRuntime(); + num_ran = benchmark::RunSpecifiedBenchmarks(&test_reporter); + assert(num_ran == ExpectedResults.size()); + + typedef benchmark::BenchmarkReporter::Run Run; + auto EB = ExpectedResults.begin(); + + for (Run const& run : test_reporter.all_runs_) { + assert(EB != ExpectedResults.end()); + EB->CheckRun(run); + ++EB; + } + assert(EB == ExpectedResults.end()); +} + +int main(int argc, char* argv[]) { + benchmark::Initialize(&argc, argv); + + RunTestOne(); + RunTestTwo(); } Index: MicroBenchmarks/libs/benchmark-1.2.0/test/reporter_output_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/test/reporter_output_test.cc +++ MicroBenchmarks/libs/benchmark-1.2.0/test/reporter_output_test.cc @@ -9,11 +9,11 @@ // ---------------------- Testing Prologue Output -------------------------- // // ========================================================================= // -ADD_CASES(TC_ConsoleOut, {{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, - {"^[-]+$", MR_Next}}); -ADD_CASES(TC_CSVOut, - {{"name,iterations,real_time,cpu_time,time_unit,bytes_per_second," - "items_per_second,label,error_occurred,error_message"}}); +ADD_CASES(TC_ConsoleOut, + {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, + {"^[-]+$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"%csv_header"}}); // ========================================================================= // // ------------------------ Testing Basic Output --------------------------- // Index: MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_tabular_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_tabular_test.cc @@ -0,0 +1,250 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// @todo: this checks the full output at once; the rule for +// CounterSet1 was failing because it was not matching "^[-]+$". +// @todo: check that the counters are vertically aligned. +ADD_CASES(TC_ConsoleOut, { +// keeping these lines long improves readability, so: +// clang-format off + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next}, + {"^[-]+$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, +// clang-format on +}); +ADD_CASES(TC_CSVOut, {{"%csv_header," + "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +void BM_Counters_Tabular(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", { 1, bm::Counter::kAvgThreads}}, + {"Bar", { 2, bm::Counter::kAvgThreads}}, + {"Baz", { 4, bm::Counter::kAvgThreads}}, + {"Bat", { 8, bm::Counter::kAvgThreads}}, + {"Frob", {16, bm::Counter::kAvgThreads}}, + {"Lob", {32, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabular(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8); + CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); + CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); + +// ========================================================================= // +// -------------------- Tabular+Rate Counters Output ----------------------- // +// ========================================================================= // + +void BM_CounterRates_Tabular(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", { 1, bm::Counter::kAvgThreadsRate}}, + {"Bar", { 2, bm::Counter::kAvgThreadsRate}}, + {"Baz", { 4, bm::Counter::kAvgThreadsRate}}, + {"Bat", { 8, bm::Counter::kAvgThreadsRate}}, + {"Frob", {16, bm::Counter::kAvgThreadsRate}}, + {"Lob", {32, bm::Counter::kAvgThreadsRate}}, + }); +} +BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckTabularRate(Results const& e) { + double t = e.DurationCPUTime(); + CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32./t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", + &CheckTabularRate); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters +void BM_CounterSet0_Tabular(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bar", {20, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet0(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0); + +// again. +void BM_CounterSet1_Tabular(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {15, bm::Counter::kAvgThreads}}, + {"Bar", {25, bm::Counter::kAvgThreads}}, + {"Baz", {45, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," + "%float,,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet1(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15); + CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1); + +// ========================================================================= // +// ------------------------- Tabular Counters Output ----------------------- // +// ========================================================================= // + +// set only some of the counters, different set now. +void BM_CounterSet2_Tabular(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters.insert({ + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bat", {30, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, + }); +} +BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," + ",%float,%float,%float,,"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSet2(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10); + CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30); + CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40); +} +CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } Index: MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/test/user_counters_test.cc @@ -0,0 +1,217 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ---------------------- Testing Prologue Output -------------------------- // +// ========================================================================= // + +ADD_CASES(TC_ConsoleOut, + {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, + {"^[-]+$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); + +// ========================================================================= // +// ------------------------- Simple Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Simple(benchmark::State& state) { + while (state.KeepRunning()) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2 * (double)state.iterations(); +} +BENCHMARK(BM_Counters_Simple); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckSimple(Results const& e) { + double its = e.GetAs< double >("iterations"); + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + // check that the value of bar is within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2.*its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); + +// ========================================================================= // +// --------------------- Counters+Items+Bytes/s Output --------------------- // +// ========================================================================= // + +namespace { int num_calls1 = 0; } +void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { + while (state.KeepRunning()) { + } + state.counters["foo"] = 1; + state.counters["bar"] = ++num_calls1; + state.SetBytesProcessed(364); + state.SetItemsProcessed(150); +} +BENCHMARK(BM_Counters_WithBytesAndItemsPSec); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_WithBytesAndItemsPSec %console_report " + "bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bytes_per_second\": %int,$", MR_Next}, + {"\"items_per_second\": %int,$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," + "%csv_bytes_items_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckBytesAndItemsPSec(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364./t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150./t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", + &CheckBytesAndItemsPSec); + +// ========================================================================= // +// ------------------------- Rate Counters Output -------------------------- // +// ========================================================================= // + +void BM_Counters_Rate(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; +} +BENCHMARK(BM_Counters_Rate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckRate(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); + +// ========================================================================= // +// ------------------------- Thread Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Threads(benchmark::State& state) { + while (state.KeepRunning()) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2; +} +BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads()); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads()); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreads(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; +} +BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreads(Results const& e) { + CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); + CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", + &CheckAvgThreads); + +// ========================================================================= // +// ---------------------- ThreadAvg Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_AvgThreadsRate(benchmark::State& state) { + while (state.KeepRunning()) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; +} +BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %int,$", MR_Next}, + {"\"cpu_time\": %int,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/threads:%int\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgThreadsRate(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./e.DurationCPUTime(), 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", + &CheckAvgThreadsRate); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/compare_bench.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/compare_bench.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +""" +compare_bench.py - Compare two benchmarks or their results and report the + difference. +""" +import argparse +from argparse import ArgumentParser +import sys +import gbench +from gbench import util, report +from gbench.util import * + +def check_inputs(in1, in2, flags): + """ + Perform checking on the user provided inputs and diagnose any abnormalities + """ + in1_kind, in1_err = classify_input_file(in1) + in2_kind, in2_err = classify_input_file(in2) + output_file = find_benchmark_flag('--benchmark_out=', flags) + output_type = find_benchmark_flag('--benchmark_out_format=', flags) + if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: + print(("WARNING: '--benchmark_out=%s' will be passed to both " + "benchmarks causing it to be overwritten") % output_file) + if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: + print("WARNING: passing --benchmark flags has no effect since both " + "inputs are JSON") + if output_type is not None and output_type != 'json': + print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`" + " is not supported.") % output_type) + sys.exit(1) + + +def main(): + parser = ArgumentParser( + description='compare the results of two benchmarks') + parser.add_argument( + 'test1', metavar='test1', type=str, nargs=1, + help='A benchmark executable or JSON output file') + parser.add_argument( + 'test2', metavar='test2', type=str, nargs=1, + help='A benchmark executable or JSON output file') + # FIXME this is a dummy argument which will never actually match + # any --benchmark flags but it helps generate a better usage message + parser.add_argument( + 'benchmark_options', metavar='benchmark_option', nargs='*', + help='Arguments to pass when running benchmark executables' + ) + args, unknown_args = parser.parse_known_args() + # Parse the command line flags + test1 = args.test1[0] + test2 = args.test2[0] + if args.benchmark_options: + print("Unrecognized positional argument arguments: '%s'" + % args.benchmark_options) + exit(1) + benchmark_options = unknown_args + check_inputs(test1, test2, benchmark_options) + # Run the benchmarks and report the results + json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) + json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options) + output_lines = gbench.report.generate_difference_report(json1, json2) + print('Comparing %s to %s' % (test1, test2)) + for ln in output_lines: + print(ln) + + +if __name__ == '__main__': + main() Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run1.json =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run1.json +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run1.json @@ -41,6 +41,20 @@ "real_time": 100, "cpu_time": 100, "time_unit": "ns" + }, + { + "name": "BM_100xSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_100xFaster", + "iterations": 1000, + "real_time": 10000, + "cpu_time": 10000, + "time_unit": "ns" } ] } \ No newline at end of file Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run2.json =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run2.json +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/Inputs/test1_run2.json @@ -41,6 +41,20 @@ "real_time": 110, "cpu_time": 110, "time_unit": "ns" + }, + { + "name": "BM_100xSlower", + "iterations": 1000, + "real_time": 10000, + "cpu_time": 10000, + "time_unit": "ns" + }, + { + "name": "BM_100xFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" } ] } \ No newline at end of file Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/report.py =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/report.py +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/report.py @@ -80,7 +80,9 @@ first_line = "{:<{}s} Time CPU Old New".format( 'Benchmark', first_col_width) output_strs = [first_line, '-' * len(first_line)] - for bn in json1['benchmarks']: + + gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn) + for bn in gen: other_bench = find_test(bn['name']) if not other_bench: continue @@ -92,7 +94,7 @@ return BC_WHITE else: return BC_CYAN - fmt_str = "{}{:<{}s}{endc} {}{:+.2f}{endc} {}{:+.2f}{endc} {:4d} {:4d}" + fmt_str = "{}{:<{}s}{endc}{}{:+9.2f}{endc}{}{:+14.2f}{endc}{:14d}{:14d}" tres = calculate_change(bn['real_time'], other_bench['real_time']) cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) output_strs += [color_format(use_color, fmt_str, @@ -121,19 +123,22 @@ def test_basic(self): expect_lines = [ - ['BM_SameTimes', '+0.00', '+0.00'], - ['BM_2xFaster', '-0.50', '-0.50'], - ['BM_2xSlower', '+1.00', '+1.00'], - ['BM_10PercentFaster', '-0.10', '-0.10'], - ['BM_10PercentSlower', '+0.10', '+0.10'] + ['BM_SameTimes', '+0.00', '+0.00', '10', '10'], + ['BM_2xFaster', '-0.50', '-0.50', '50', '25'], + ['BM_2xSlower', '+1.00', '+1.00', '50', '100'], + ['BM_10PercentFaster', '-0.10', '-0.10', '100', '90'], + ['BM_10PercentSlower', '+0.10', '+0.10', '100', '110'], + ['BM_100xSlower', '+99.00', '+99.00', '100', '10000'], + ['BM_100xFaster', '-0.99', '-0.99', '10000', '100'], ] json1, json2 = self.load_results() - output_lines = generate_difference_report(json1, json2, use_color=False) - print output_lines + output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in xrange(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(len(parts), 3) + self.assertEqual(len(parts), 5) self.assertEqual(parts, expect_lines[i]) Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py @@ -20,21 +20,21 @@ """ if not os.path.isfile(filename): return False - with open(filename, 'r') as f: + with open(filename, mode='rb') as f: magic_bytes = f.read(_num_magic_bytes) if sys.platform == 'darwin': return magic_bytes in [ - '\xfe\xed\xfa\xce', # MH_MAGIC - '\xce\xfa\xed\xfe', # MH_CIGAM - '\xfe\xed\xfa\xcf', # MH_MAGIC_64 - '\xcf\xfa\xed\xfe', # MH_CIGAM_64 - '\xca\xfe\xba\xbe', # FAT_MAGIC - '\xbe\xba\xfe\xca' # FAT_CIGAM + b'\xfe\xed\xfa\xce', # MH_MAGIC + b'\xce\xfa\xed\xfe', # MH_CIGAM + b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 + b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 + b'\xca\xfe\xba\xbe', # FAT_MAGIC + b'\xbe\xba\xfe\xca' # FAT_CIGAM ] elif sys.platform.startswith('win'): - return magic_bytes == 'MZ' + return magic_bytes == b'MZ' else: - return magic_bytes == '\x7FELF' + return magic_bytes == b'\x7FELF' def is_json_file(filename): @@ -68,7 +68,7 @@ elif is_json_file(filename): ftype = IT_JSON else: - err_msg = "'%s' does not name a valid benchmark executable or JSON file" + err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename return ftype, err_msg @@ -80,10 +80,30 @@ """ ftype, msg = classify_input_file(filename) if ftype == IT_Invalid: - print "Invalid input file: %s" % msg + print("Invalid input file: %s" % msg) sys.exit(1) return ftype +def find_benchmark_flag(prefix, benchmark_flags): + """ + Search the specified list of flags for a flag matching `` and + if it is found return the arg it specifies. If specified more than once the + last value is returned. If the flag is not found None is returned. + """ + assert prefix.startswith('--') and prefix.endswith('=') + result = None + for f in benchmark_flags: + if f.startswith(prefix): + result = f[len(prefix):] + return result + +def remove_benchmark_flags(prefix, benchmark_flags): + """ + Return a new list containing the specified benchmark_flags except those + with the specified prefix. + """ + assert prefix.startswith('--') and prefix.endswith('=') + return [f for f in benchmark_flags if not f.startswith(prefix)] def load_benchmark_results(fname): """ @@ -101,16 +121,25 @@ real time console output. RETURNS: A JSON object representing the benchmark output """ - thandle, tname = tempfile.mkstemp() - os.close(thandle) + output_name = find_benchmark_flag('--benchmark_out=', + benchmark_flags) + is_temp_output = False + if output_name is None: + is_temp_output = True + thandle, output_name = tempfile.mkstemp() + os.close(thandle) + benchmark_flags = list(benchmark_flags) + \ + ['--benchmark_out=%s' % output_name] + cmd = [exe_name] + benchmark_flags print("RUNNING: %s" % ' '.join(cmd)) - exitCode = subprocess.call(cmd + ['--benchmark_out=%s' % tname]) + exitCode = subprocess.call(cmd) if exitCode != 0: print('TEST FAILED...') sys.exit(exitCode) - json_res = load_benchmark_results(tname) - os.unlink(tname) + json_res = load_benchmark_results(output_name) + if is_temp_output: + os.unlink(output_name) return json_res