Index: MicroBenchmarks/libs/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/CMakeLists.txt +++ MicroBenchmarks/libs/CMakeLists.txt @@ -1,3 +1,3 @@ -add_subdirectory(benchmark-1.2.0) +add_subdirectory(benchmark-1.3.0) test_suite_add_build_dependencies(benchmark) test_suite_add_build_dependencies(output_test_helper) Index: MicroBenchmarks/libs/benchmark-1.2.0/.clang-format =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/.clang-format @@ -1,5 +0,0 @@ ---- -Language: Cpp -BasedOnStyle: Google -... - Index: MicroBenchmarks/libs/benchmark-1.2.0/.gitignore =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/.gitignore @@ -1,46 +0,0 @@ -*.a -*.so -*.so.?* -*.dll -*.exe -*.dylib -*.cmake -!/cmake/*.cmake -*~ -*.pyc -__pycache__ - -# lcov -*.lcov -/lcov - -# cmake files. -/Testing -CMakeCache.txt -CMakeFiles/ -cmake_install.cmake - -# makefiles. -Makefile - -# in-source build. -bin/ -lib/ -/test/*_test - -# exuberant ctags. -tags - -# YouCompleteMe configuration. -.ycm_extra_conf.pyc - -# ninja generated files. -.ninja_deps -.ninja_log -build.ninja -install_manifest.txt -rules.ninja - -# out-of-source build top-level folders. -build/ -_build/ Index: MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/.travis-libcxx-setup.sh @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Install a newer CMake version -curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh -chmod +x install-cmake.sh -sudo ./install-cmake.sh --prefix=/usr/local --skip-license - -# Checkout LLVM sources -git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source -git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx -git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi - -# Setup libc++ options -if [ -z "$BUILD_32_BITS" ]; then - export BUILD_32_BITS=OFF && echo disabling 32 bit build -fi - -# Build and install libc++ (Use unstable ABI for better sanitizer coverage) -mkdir llvm-build && cd llvm-build -cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ - -DLIBCXX_ABI_UNSTABLE=ON \ - -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ - -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ - ../llvm-source -make cxx -j2 -sudo make install-cxxabi install-cxx -cd ../ Index: MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/.travis.yml @@ -1,157 +0,0 @@ -sudo: required -dist: trusty -language: cpp - -env: - global: - - /usr/local/bin:$PATH - -matrix: - include: - - compiler: gcc - addons: - apt: - packages: - - lcov - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release - - compiler: gcc - addons: - apt: - packages: - - g++-multilib - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON - - compiler: gcc - addons: - apt: - packages: - - g++-multilib - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON - - compiler: gcc - addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-6 - env: - - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug - - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release - # Clang w/ libc++ - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 - - EXTRA_FLAGS="-stdlib=libc++" - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release - - LIBCXX_BUILD=1 - - EXTRA_FLAGS="-stdlib=libc++" - # Clang w/ 32bit libc++ - - compiler: clang - addons: - apt: - packages: - - clang-3.8 - - g++-multilib - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-stdlib=libc++ -m32" - # Clang w/ 32bit libc++ - - compiler: clang - addons: - apt: - packages: - - clang-3.8 - - g++-multilib - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release - - LIBCXX_BUILD=1 - - BUILD_32_BITS=ON - - EXTRA_FLAGS="-stdlib=libc++ -m32" - # Clang w/ libc++, ASAN, UBSAN - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" - - UBSAN_OPTIONS=print_stacktrace=1 - # Clang w/ libc++ and MSAN - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" - # Clang w/ libc++ and MSAN - - compiler: clang - addons: - apt: - packages: - clang-3.8 - env: - - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo - - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" - - - os: osx - osx_image: xcode8.3 - compiler: clang - env: - - COMPILER=clang++ BUILD_TYPE=Debug - - os: osx - osx_image: xcode8.3 - compiler: clang - env: - - COMPILER=clang++ BUILD_TYPE=Release - -before_script: - - if [ -z "$BUILD_32_BITS" ]; then - export BUILD_32_BITS=OFF && echo disabling 32 bit build; - fi - - if [ -n "${LIBCXX_BUILD}" ]; then - source .travis-libcxx-setup.sh; - fi - - mkdir build && cd build - -install: - - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then - PATH=~/.local/bin:${PATH}; - pip install --user --upgrade pip; - pip install --user cpp-coveralls; - fi - -script: - - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} .. - - make - - ctest -C ${BUILD_TYPE} --output-on-failure - -after_success: - - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then - coveralls --include src --include include --gcov-options '\-lp' --root .. --build-root .; - fi Index: MicroBenchmarks/libs/benchmark-1.2.0/.ycm_extra_conf.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/.ycm_extra_conf.py @@ -1,115 +0,0 @@ -import os -import ycm_core - -# These are the compilation flags that will be used in case there's no -# compilation database set (by default, one is not set). -# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR. -flags = [ -'-Wall', -'-Werror', -'-pendantic-errors', -'-std=c++0x', -'-fno-strict-aliasing', -'-O3', -'-DNDEBUG', -# ...and the same thing goes for the magic -x option which specifies the -# language that the files to be compiled are written in. This is mostly -# relevant for c++ headers. -# For a C project, you would set this to 'c' instead of 'c++'. -'-x', 'c++', -'-I', 'include', -'-isystem', '/usr/include', -'-isystem', '/usr/local/include', -] - - -# Set this to the absolute path to the folder (NOT the file!) containing the -# compile_commands.json file to use that instead of 'flags'. See here for -# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html -# -# Most projects will NOT need to set this to anything; you can just change the -# 'flags' list of compilation flags. Notice that YCM itself uses that approach. -compilation_database_folder = '' - -if os.path.exists( compilation_database_folder ): - database = ycm_core.CompilationDatabase( compilation_database_folder ) -else: - database = None - -SOURCE_EXTENSIONS = [ '.cc' ] - -def DirectoryOfThisScript(): - return os.path.dirname( os.path.abspath( __file__ ) ) - - -def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): - if not working_directory: - return list( flags ) - new_flags = [] - make_next_absolute = False - path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] - for flag in flags: - new_flag = flag - - if make_next_absolute: - make_next_absolute = False - if not flag.startswith( '/' ): - new_flag = os.path.join( working_directory, flag ) - - for path_flag in path_flags: - if flag == path_flag: - make_next_absolute = True - break - - if flag.startswith( path_flag ): - path = flag[ len( path_flag ): ] - new_flag = path_flag + os.path.join( working_directory, path ) - break - - if new_flag: - new_flags.append( new_flag ) - return new_flags - - -def IsHeaderFile( filename ): - extension = os.path.splitext( filename )[ 1 ] - return extension in [ '.h', '.hxx', '.hpp', '.hh' ] - - -def GetCompilationInfoForFile( filename ): - # The compilation_commands.json file generated by CMake does not have entries - # for header files. So we do our best by asking the db for flags for a - # corresponding source file, if any. If one exists, the flags for that file - # should be good enough. - if IsHeaderFile( filename ): - basename = os.path.splitext( filename )[ 0 ] - for extension in SOURCE_EXTENSIONS: - replacement_file = basename + extension - if os.path.exists( replacement_file ): - compilation_info = database.GetCompilationInfoForFile( - replacement_file ) - if compilation_info.compiler_flags_: - return compilation_info - return None - return database.GetCompilationInfoForFile( filename ) - - -def FlagsForFile( filename, **kwargs ): - if database: - # Bear in mind that compilation_info.compiler_flags_ does NOT return a - # python list, but a "list-like" StringVec object - compilation_info = GetCompilationInfoForFile( filename ) - if not compilation_info: - return None - - final_flags = MakeRelativePathsInFlagsAbsolute( - compilation_info.compiler_flags_, - compilation_info.compiler_working_dir_ ) - else: - relative_to = DirectoryOfThisScript() - final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) - - return { - 'flags': final_flags, - 'do_cache': True - } Index: MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTING.md =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/CONTRIBUTING.md @@ -1,58 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - -Once your CLA is submitted (or if you already submitted one for -another Google project), make a commit adding yourself to the -[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part -of your first [pull request][]. - -[AUTHORS]: AUTHORS -[CONTRIBUTORS]: CONTRIBUTORS - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[pull request]: https://help.github.com/articles/creating-a-pull-request Index: MicroBenchmarks/libs/benchmark-1.2.0/LICENSE =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/LICENSE @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. Index: MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/appveyor.yml @@ -1,56 +0,0 @@ -version: '{build}' - -image: Visual Studio 2017 - -configuration: - - Debug - - Release - -environment: - matrix: - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017" - - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017 Win64" - - - compiler: msvc-14-seh - generator: "Visual Studio 14 2015" - - - compiler: msvc-14-seh - generator: "Visual Studio 14 2015 Win64" - - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013" - - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013 Win64" - - - compiler: gcc-5.3.0-posix - generator: "MinGW Makefiles" - cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - -matrix: - fast_finish: true - -install: - # git bash conflicts with MinGW makefiles - - if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%") - - if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%") - -build_script: - - md _build -Force - - cd _build - - echo %configuration% - - cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" .. - - cmake --build . --config %configuration% - -test_script: - - ctest -c %configuration% --timeout 300 --output-on-failure - -artifacts: - - path: '_build/CMakeFiles/*.log' - name: logs - - path: '_build/Testing/**/*.xml' - name: test_results Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/Config.cmake.in @@ -1 +0,0 @@ -include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/GetGitVersion.cmake =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/GetGitVersion.cmake @@ -1,51 +0,0 @@ -# - Returns a version string from Git tags -# -# This function inspects the annotated git tags for the project and returns a string -# into a CMake variable -# -# get_git_version() -# -# - Example -# -# include(GetGitVersion) -# get_git_version(GIT_VERSION) -# -# Requires CMake 2.8.11+ -find_package(Git) - -if(__get_git_version) - return() -endif() -set(__get_git_version INCLUDED) - -function(get_git_version var) - if(GIT_EXECUTABLE) - execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 - RESULT_VARIABLE status - OUTPUT_VARIABLE GIT_VERSION - ERROR_QUIET) - if(${status}) - set(GIT_VERSION "v0.0.0") - else() - string(STRIP ${GIT_VERSION} GIT_VERSION) - string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) - endif() - - # Work out if the repository is dirty - execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh - OUTPUT_QUIET - ERROR_QUIET) - execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD -- - OUTPUT_VARIABLE GIT_DIFF_INDEX - ERROR_QUIET) - string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) - if (${GIT_DIRTY}) - set(GIT_VERSION "${GIT_VERSION}-dirty") - endif() - else() - set(GIT_VERSION "v0.0.0") - endif() - - message("-- git Version: ${GIT_VERSION}") - set(${var} ${GIT_VERSION} PARENT_SCOPE) -endfunction() Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/gnu_posix_regex.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/gnu_posix_regex.cpp @@ -1,12 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; -} - Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/posix_regex.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/posix_regex.cpp @@ -1,14 +0,0 @@ -#include -#include -int main() { - std::string str = "test0159"; - regex_t re; - int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - return ec; - } - int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0; - regfree(&re); - return ret; -} - Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/std_regex.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/std_regex.cpp @@ -1,10 +0,0 @@ -#include -#include -int main() { - const std::string str = "test0159"; - std::regex re; - re = std::regex("^[a-z]+[0-9]+$", - std::regex_constants::extended | std::regex_constants::nosubs); - return std::regex_search(str, re) ? 0 : -1; -} - Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/steady_clock.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/steady_clock.cpp @@ -1,7 +0,0 @@ -#include - -int main() { - typedef std::chrono::steady_clock Clock; - Clock::time_point tp = Clock::now(); - ((void)tp); -} Index: MicroBenchmarks/libs/benchmark-1.2.0/cmake/thread_safety_attributes.cpp =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/cmake/thread_safety_attributes.cpp @@ -1,4 +0,0 @@ -#define HAVE_THREAD_SAFETY_ATTRIBUTES -#include "../src/mutex.h" - -int main() {} Index: MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/docs/tools.md @@ -1,59 +0,0 @@ -# Benchmark Tools - -## compare_bench.py - -The `compare_bench.py` utility which can be used to compare the result of benchmarks. -The program is invoked like: - -``` bash -$ compare_bench.py [benchmark options]... -``` - -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -The sample output using the JSON test files under `Inputs/` gives: - -``` bash -$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json -Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json -Benchmark Time CPU ----------------------------------------------- -BM_SameTimes +0.00 +0.00 -BM_2xFaster -0.50 -0.50 -BM_2xSlower +1.00 +1.00 -BM_10PercentFaster -0.10 -0.10 -BM_10PercentSlower +0.10 +0.10 -``` - -When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like: - -``` -./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.* -RUNNING: test/basic_test --benchmark_filter=BM_empty.* -Run on (4 X 4228.32 MHz CPU s) -2016-08-02 19:21:33 -Benchmark Time CPU Iterations --------------------------------------------------------------------- -BM_empty 9 ns 9 ns 79545455 -BM_empty/threads:4 4 ns 9 ns 75268816 -BM_empty_stop_start 8 ns 8 ns 83333333 -BM_empty_stop_start/threads:4 3 ns 8 ns 83333332 -RUNNING: test/basic_test --benchmark_filter=BM_empty.* -Run on (4 X 4228.32 MHz CPU s) -2016-08-02 19:21:35 -Benchmark Time CPU Iterations --------------------------------------------------------------------- -BM_empty 9 ns 9 ns 76086957 -BM_empty/threads:4 4 ns 9 ns 76086956 -BM_empty_stop_start 8 ns 8 ns 87500000 -BM_empty_stop_start/threads:4 3 ns 8 ns 88607596 -Comparing test/basic_test to test/basic_test -Benchmark Time CPU ---------------------------------------------------------- -BM_empty +0.00 +0.00 -BM_empty/threads:4 +0.00 +0.00 -BM_empty_stop_start +0.00 +0.00 -BM_empty_stop_start/threads:4 +0.00 +0.00 -``` - -Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks. Index: MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/benchmark_api.h @@ -1,27 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#ifndef BENCHMARK_BENCHMARK_API_H_ -#define BENCHMARK_BENCHMARK_API_H_ - -#ifdef __DEPRECATED -# ifndef BENCHMARK_WARNING_MSG -# warning the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead -# else - BENCHMARK_WARNING_MSG("the benchmark_api.h header has been deprecated and will be removed, please include benchmark.h instead") -# endif -#endif - -#include "benchmark.h" // For forward declaration of BenchmarkReporter - -#endif // BENCHMARK_BENCHMARK_API_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/include/benchmark/reporter.h @@ -1,27 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#ifndef BENCHMARK_REPORTER_H_ -#define BENCHMARK_REPORTER_H_ - -#ifdef __DEPRECATED -# ifndef BENCHMARK_WARNING_MSG -# warning the reporter.h header has been deprecated and will be removed, please include benchmark.h instead -# else - BENCHMARK_WARNING_MSG("the reporter.h header has been deprecated and will be removed, please include benchmark.h instead") -# endif -#endif - -#include "benchmark.h" // For forward declaration of BenchmarkReporter - -#endif // BENCHMARK_REPORTER_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/mingw.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/mingw.py @@ -1,320 +0,0 @@ -#! /usr/bin/env python -# encoding: utf-8 - -import argparse -import errno -import logging -import os -import platform -import re -import sys -import subprocess -import tempfile - -try: - import winreg -except ImportError: - import _winreg as winreg -try: - import urllib.request as request -except ImportError: - import urllib as request -try: - import urllib.parse as parse -except ImportError: - import urlparse as parse - -class EmptyLogger(object): - ''' - Provides an implementation that performs no logging - ''' - def debug(self, *k, **kw): - pass - def info(self, *k, **kw): - pass - def warn(self, *k, **kw): - pass - def error(self, *k, **kw): - pass - def critical(self, *k, **kw): - pass - def setLevel(self, *k, **kw): - pass - -urls = ( - 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20' - 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/' - 'repository.txt', - 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/' - 'repository.txt' -) -''' -A list of mingw-build repositories -''' - -def repository(urls = urls, log = EmptyLogger()): - ''' - Downloads and parse mingw-build repository files and parses them - ''' - log.info('getting mingw-builds repository') - versions = {} - re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') - re_sub = r'http://downloads.sourceforge.net/project/\1' - for url in urls: - log.debug(' - requesting: %s', url) - socket = request.urlopen(url) - repo = socket.read() - if not isinstance(repo, str): - repo = repo.decode(); - socket.close() - for entry in repo.split('\n')[:-1]: - value = entry.split('|') - version = tuple([int(n) for n in value[0].strip().split('.')]) - version = versions.setdefault(version, {}) - arch = value[1].strip() - if arch == 'x32': - arch = 'i686' - elif arch == 'x64': - arch = 'x86_64' - arch = version.setdefault(arch, {}) - threading = arch.setdefault(value[2].strip(), {}) - exceptions = threading.setdefault(value[3].strip(), {}) - revision = exceptions.setdefault(int(value[4].strip()[3:]), - re_sourceforge.sub(re_sub, value[5].strip())) - return versions - -def find_in_path(file, path=None): - ''' - Attempts to find an executable in the path - ''' - if platform.system() == 'Windows': - file += '.exe' - if path is None: - path = os.environ.get('PATH', '') - if type(path) is type(''): - path = path.split(os.pathsep) - return list(filter(os.path.exists, - map(lambda dir, file=file: os.path.join(dir, file), path))) - -def find_7zip(log = EmptyLogger()): - ''' - Attempts to find 7zip for unpacking the mingw-build archives - ''' - log.info('finding 7zip') - path = find_in_path('7z') - if not path: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip') - path, _ = winreg.QueryValueEx(key, 'Path') - path = [os.path.join(path, '7z.exe')] - log.debug('found \'%s\'', path[0]) - return path[0] - -find_7zip() - -def unpack(archive, location, log = EmptyLogger()): - ''' - Unpacks a mingw-builds archive - ''' - sevenzip = find_7zip(log) - log.info('unpacking %s', os.path.basename(archive)) - cmd = [sevenzip, 'x', archive, '-o' + location, '-y'] - log.debug(' - %r', cmd) - with open(os.devnull, 'w') as devnull: - subprocess.check_call(cmd, stdout = devnull) - -def download(url, location, log = EmptyLogger()): - ''' - Downloads and unpacks a mingw-builds archive - ''' - log.info('downloading MinGW') - log.debug(' - url: %s', url) - log.debug(' - location: %s', location) - - re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*') - - stream = request.urlopen(url) - try: - content = stream.getheader('Content-Disposition') or '' - except AttributeError: - content = stream.headers.getheader('Content-Disposition') or '' - matches = re_content.match(content) - if matches: - filename = matches.group(2) - else: - parsed = parse.urlparse(stream.geturl()) - filename = os.path.basename(parsed.path) - - try: - os.makedirs(location) - except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir(location): - pass - else: - raise - - archive = os.path.join(location, filename) - with open(archive, 'wb') as out: - while True: - buf = stream.read(1024) - if not buf: - break - out.write(buf) - unpack(archive, location, log = log) - os.remove(archive) - - possible = os.path.join(location, 'mingw64') - if not os.path.exists(possible): - possible = os.path.join(location, 'mingw32') - if not os.path.exists(possible): - raise ValueError('Failed to find unpacked MinGW: ' + possible) - return possible - -def root(location = None, arch = None, version = None, threading = None, - exceptions = None, revision = None, log = EmptyLogger()): - ''' - Returns the root folder of a specific version of the mingw-builds variant - of gcc. Will download the compiler if needed - ''' - - # Get the repository if we don't have all the information - if not (arch and version and threading and exceptions and revision): - versions = repository(log = log) - - # Determine some defaults - version = version or max(versions.keys()) - if not arch: - arch = platform.machine().lower() - if arch == 'x86': - arch = 'i686' - elif arch == 'amd64': - arch = 'x86_64' - if not threading: - keys = versions[version][arch].keys() - if 'posix' in keys: - threading = 'posix' - elif 'win32' in keys: - threading = 'win32' - else: - threading = keys[0] - if not exceptions: - keys = versions[version][arch][threading].keys() - if 'seh' in keys: - exceptions = 'seh' - elif 'sjlj' in keys: - exceptions = 'sjlj' - else: - exceptions = keys[0] - if revision == None: - revision = max(versions[version][arch][threading][exceptions].keys()) - if not location: - location = os.path.join(tempfile.gettempdir(), 'mingw-builds') - - # Get the download url - url = versions[version][arch][threading][exceptions][revision] - - # Tell the user whatzzup - log.info('finding MinGW %s', '.'.join(str(v) for v in version)) - log.debug(' - arch: %s', arch) - log.debug(' - threading: %s', threading) - log.debug(' - exceptions: %s', exceptions) - log.debug(' - revision: %s', revision) - log.debug(' - url: %s', url) - - # Store each specific revision differently - slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}' - slug = slug.format( - version = '.'.join(str(v) for v in version), - arch = arch, - threading = threading, - exceptions = exceptions, - revision = revision - ) - if arch == 'x86_64': - root_dir = os.path.join(location, slug, 'mingw64') - elif arch == 'i686': - root_dir = os.path.join(location, slug, 'mingw32') - else: - raise ValueError('Unknown MinGW arch: ' + arch) - - # Download if needed - if not os.path.exists(root_dir): - downloaded = download(url, os.path.join(location, slug), log = log) - if downloaded != root_dir: - raise ValueError('The location of mingw did not match\n%s\n%s' - % (downloaded, root_dir)) - - return root_dir - -def str2ver(string): - ''' - Converts a version string into a tuple - ''' - try: - version = tuple(int(v) for v in string.split('.')) - if len(version) is not 3: - raise ValueError() - except ValueError: - raise argparse.ArgumentTypeError( - 'please provide a three digit version string') - return version - -def main(): - ''' - Invoked when the script is run directly by the python interpreter - ''' - parser = argparse.ArgumentParser( - description = 'Downloads a specific version of MinGW', - formatter_class = argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument('--location', - help = 'the location to download the compiler to', - default = os.path.join(tempfile.gettempdir(), 'mingw-builds')) - parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'], - help = 'the target MinGW architecture string') - parser.add_argument('--version', type = str2ver, - help = 'the version of GCC to download') - parser.add_argument('--threading', choices = ['posix', 'win32'], - help = 'the threading type of the compiler') - parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'], - help = 'the method to throw exceptions') - parser.add_argument('--revision', type=int, - help = 'the revision of the MinGW release') - group = parser.add_mutually_exclusive_group() - group.add_argument('-v', '--verbose', action='store_true', - help='increase the script output verbosity') - group.add_argument('-q', '--quiet', action='store_true', - help='only print errors and warning') - args = parser.parse_args() - - # Create the logger - logger = logging.getLogger('mingw') - handler = logging.StreamHandler() - formatter = logging.Formatter('%(message)s') - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.INFO) - if args.quiet: - logger.setLevel(logging.WARN) - if args.verbose: - logger.setLevel(logging.DEBUG) - - # Get MinGW - root_dir = root(location = args.location, arch = args.arch, - version = args.version, threading = args.threading, - exceptions = args.exceptions, revision = args.revision, - log = logger) - - sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin')) - -if __name__ == '__main__': - try: - main() - except IOError as e: - sys.stderr.write('IO error: %s\n' % e) - sys.exit(1) - except OSError as e: - sys.stderr.write('OS error: %s\n' % e) - sys.exit(1) - except KeyboardInterrupt as e: - sys.stderr.write('Killed\n') - sys.exit(1) Index: MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/arraysize.h @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_ARRAYSIZE_H_ -#define BENCHMARK_ARRAYSIZE_H_ - -#include "internal_macros.h" - -namespace benchmark { -namespace internal { -// The arraysize(arr) macro returns the # of elements in an array arr. -// The expression is a compile-time constant, and therefore can be -// used in defining new arrays, for example. If you use arraysize on -// a pointer by mistake, you will get a compile-time error. -// - -// This template function declaration is used in defining arraysize. -// Note that the function doesn't need an implementation, as we only -// use its type. -template -char (&ArraySizeHelper(T (&array)[N]))[N]; - -// That gcc wants both of these prototypes seems mysterious. VC, for -// its part, can't decide which to use (another mystery). Matching of -// template overloads: the final frontier. -#ifndef COMPILER_MSVC -template -char (&ArraySizeHelper(const T (&array)[N]))[N]; -#endif - -#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array))) - -} // end namespace internal -} // end namespace benchmark - -#endif // BENCHMARK_ARRAYSIZE_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/check.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/check.h @@ -1,79 +0,0 @@ -#ifndef CHECK_H_ -#define CHECK_H_ - -#include -#include -#include - -#include "internal_macros.h" -#include "log.h" - -namespace benchmark { -namespace internal { - -typedef void(AbortHandlerT)(); - -inline AbortHandlerT*& GetAbortHandler() { - static AbortHandlerT* handler = &std::abort; - return handler; -} - -BENCHMARK_NORETURN inline void CallAbortHandler() { - GetAbortHandler()(); - std::abort(); // fallback to enforce noreturn -} - -// CheckHandler is the class constructed by failing CHECK macros. CheckHandler -// will log information about the failures and abort when it is destructed. -class CheckHandler { - public: - CheckHandler(const char* check, const char* file, const char* func, int line) - : log_(GetErrorLogInstance()) { - log_ << file << ":" << line << ": " << func << ": Check `" << check - << "' failed. "; - } - - LogType& GetLog() { return log_; } - - BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { - log_ << std::endl; - CallAbortHandler(); - } - - CheckHandler& operator=(const CheckHandler&) = delete; - CheckHandler(const CheckHandler&) = delete; - CheckHandler() = delete; - - private: - LogType& log_; -}; - -} // end namespace internal -} // end namespace benchmark - -// The CHECK macro returns a std::ostream object that can have extra information -// written to it. -#ifndef NDEBUG -#define CHECK(b) \ - (b ? ::benchmark::internal::GetNullLogInstance() \ - : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ - .GetLog()) -#else -#define CHECK(b) ::benchmark::internal::GetNullLogInstance() -#endif - -#define CHECK_EQ(a, b) CHECK((a) == (b)) -#define CHECK_NE(a, b) CHECK((a) != (b)) -#define CHECK_GE(a, b) CHECK((a) >= (b)) -#define CHECK_LE(a, b) CHECK((a) <= (b)) -#define CHECK_GT(a, b) CHECK((a) > (b)) -#define CHECK_LT(a, b) CHECK((a) < (b)) - -#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) -#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) -#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) -#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) -#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) -#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) - -#endif // CHECK_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.h @@ -1,33 +0,0 @@ -#ifndef BENCHMARK_COLORPRINT_H_ -#define BENCHMARK_COLORPRINT_H_ - -#include -#include -#include - -namespace benchmark { -enum LogColor { - COLOR_DEFAULT, - COLOR_RED, - COLOR_GREEN, - COLOR_YELLOW, - COLOR_BLUE, - COLOR_MAGENTA, - COLOR_CYAN, - COLOR_WHITE -}; - -std::string FormatString(const char* msg, va_list args); -std::string FormatString(const char* msg, ...); - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args); -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...); - -// Returns true if stdout appears to be a terminal that supports colored -// output, false otherwise. -bool IsColorTerminal(); - -} // end namespace benchmark - -#endif // BENCHMARK_COLORPRINT_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/colorprint.cc @@ -1,188 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "colorprint.h" - -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#include -#else -#include -#endif // BENCHMARK_OS_WINDOWS - -namespace benchmark { -namespace { -#ifdef BENCHMARK_OS_WINDOWS -typedef WORD PlatformColorCode; -#else -typedef const char* PlatformColorCode; -#endif - -PlatformColorCode GetPlatformColorCode(LogColor color) { -#ifdef BENCHMARK_OS_WINDOWS - switch (color) { - case COLOR_RED: - return FOREGROUND_RED; - case COLOR_GREEN: - return FOREGROUND_GREEN; - case COLOR_YELLOW: - return FOREGROUND_RED | FOREGROUND_GREEN; - case COLOR_BLUE: - return FOREGROUND_BLUE; - case COLOR_MAGENTA: - return FOREGROUND_BLUE | FOREGROUND_RED; - case COLOR_CYAN: - return FOREGROUND_BLUE | FOREGROUND_GREEN; - case COLOR_WHITE: // fall through to default - default: - return 0; - } -#else - switch (color) { - case COLOR_RED: - return "1"; - case COLOR_GREEN: - return "2"; - case COLOR_YELLOW: - return "3"; - case COLOR_BLUE: - return "4"; - case COLOR_MAGENTA: - return "5"; - case COLOR_CYAN: - return "6"; - case COLOR_WHITE: - return "7"; - default: - return nullptr; - }; -#endif -} - -} // end namespace - -std::string FormatString(const char* msg, va_list args) { - // we might need a second shot at this, so pre-emptivly make a copy - va_list args_cp; - va_copy(args_cp, args); - - std::size_t size = 256; - char local_buff[256]; - auto ret = vsnprintf(local_buff, size, msg, args_cp); - - va_end(args_cp); - - // currently there is no error handling for failure, so this is hack. - CHECK(ret >= 0); - - if (ret == 0) // handle empty expansion - return {}; - else if (static_cast(ret) < size) - return local_buff; - else { - // we did not provide a long enough buffer on our first attempt. - size = (size_t)ret + 1; // + 1 for the null byte - std::unique_ptr buff(new char[size]); - ret = vsnprintf(buff.get(), size, msg, args); - CHECK(ret > 0 && ((size_t)ret) < size); - return buff.get(); - } -} - -std::string FormatString(const char* msg, ...) { - va_list args; - va_start(args, msg); - auto tmp = FormatString(msg, args); - va_end(args); - return tmp; -} - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, ...) { - va_list args; - va_start(args, fmt); - ColorPrintf(out, color, fmt, args); - va_end(args); -} - -void ColorPrintf(std::ostream& out, LogColor color, const char* fmt, - va_list args) { -#ifdef BENCHMARK_OS_WINDOWS - ((void)out); // suppress unused warning - - const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); - - // Gets the current text color. - CONSOLE_SCREEN_BUFFER_INFO buffer_info; - GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); - const WORD old_color_attrs = buffer_info.wAttributes; - - // We need to flush the stream buffers into the console before each - // SetConsoleTextAttribute call lest it affect the text that is already - // printed but has not yet reached the console. - fflush(stdout); - SetConsoleTextAttribute(stdout_handle, - GetPlatformColorCode(color) | FOREGROUND_INTENSITY); - vprintf(fmt, args); - - fflush(stdout); - // Restores the text color. - SetConsoleTextAttribute(stdout_handle, old_color_attrs); -#else - const char* color_code = GetPlatformColorCode(color); - if (color_code) out << FormatString("\033[0;3%sm", color_code); - out << FormatString(fmt, args) << "\033[m"; -#endif -} - -bool IsColorTerminal() { -#if BENCHMARK_OS_WINDOWS - // On Windows the TERM variable is usually not set, but the - // console there does support colors. - return 0 != _isatty(_fileno(stdout)); -#else - // On non-Windows platforms, we rely on the TERM variable. This list of - // supported TERM values is copied from Google Test: - // . - const char* const SUPPORTED_TERM_VALUES[] = { - "xterm", "xterm-color", "xterm-256color", - "screen", "screen-256color", "tmux", - "tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", - "linux", "cygwin", - }; - - const char* const term = getenv("TERM"); - - bool term_supports_color = false; - for (const char* candidate : SUPPORTED_TERM_VALUES) { - if (term && 0 == strcmp(term, candidate)) { - term_supports_color = true; - break; - } - } - - return 0 != isatty(fileno(stdout)) && term_supports_color; -#endif // BENCHMARK_OS_WINDOWS -} - -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.h @@ -1,79 +0,0 @@ -#ifndef BENCHMARK_COMMANDLINEFLAGS_H_ -#define BENCHMARK_COMMANDLINEFLAGS_H_ - -#include -#include - -// Macro for referencing flags. -#define FLAG(name) FLAGS_##name - -// Macros for declaring flags. -#define DECLARE_bool(name) extern bool FLAG(name) -#define DECLARE_int32(name) extern int32_t FLAG(name) -#define DECLARE_int64(name) extern int64_t FLAG(name) -#define DECLARE_double(name) extern double FLAG(name) -#define DECLARE_string(name) extern std::string FLAG(name) - -// Macros for defining flags. -#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val) -#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val) -#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val) -#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val) -#define DEFINE_string(name, default_val, doc) \ - std::string FLAG(name) = (default_val) - -namespace benchmark { -// Parses 'str' for a 32-bit signed integer. If successful, writes the result -// to *value and returns true; otherwise leaves *value unchanged and returns -// false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value); - -// Parses a bool/Int32/string from the environment variable -// corresponding to the given Google Test flag. -bool BoolFromEnv(const char* flag, bool default_val); -int32_t Int32FromEnv(const char* flag, int32_t default_val); -double DoubleFromEnv(const char* flag, double default_val); -const char* StringFromEnv(const char* flag, const char* default_val); - -// Parses a string for a bool flag, in the form of either -// "--flag=value" or "--flag". -// -// In the former case, the value is taken as true if it passes IsTruthyValue(). -// -// In the latter case, the value is taken as true. -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseBoolFlag(const char* str, const char* flag, bool* value); - -// Parses a string for an Int32 flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); - -// Parses a string for a Double flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseDoubleFlag(const char* str, const char* flag, double* value); - -// Parses a string for a string flag, in the form of -// "--flag=value". -// -// On success, stores the value of the flag in *value, and returns -// true. On failure, returns false without changing *value. -bool ParseStringFlag(const char* str, const char* flag, std::string* value); - -// Returns true if the string matches the flag. -bool IsFlag(const char* str, const char* flag); - -// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or -// some non-alphanumeric character. As a special case, also returns true if -// value is the empty string. -bool IsTruthyFlagValue(const std::string& value); -} // end namespace benchmark - -#endif // BENCHMARK_COMMANDLINEFLAGS_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/commandlineflags.cc @@ -1,218 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "commandlineflags.h" - -#include -#include -#include -#include -#include - -namespace benchmark { -// Parses 'str' for a 32-bit signed integer. If successful, writes -// the result to *value and returns true; otherwise leaves *value -// unchanged and returns false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { - // Parses the environment variable as a decimal integer. - char* end = nullptr; - const long long_value = strtol(str, &end, 10); // NOLINT - - // Has strtol() consumed all characters in the string? - if (*end != '\0') { - // No - an invalid character was encountered. - std::cerr << src_text << " is expected to be a 32-bit integer, " - << "but actually has value \"" << str << "\".\n"; - return false; - } - - // Is the parsed value in the range of an Int32? - const int32_t result = static_cast(long_value); - if (long_value == std::numeric_limits::max() || - long_value == std::numeric_limits::min() || - // The parsed value overflows as a long. (strtol() returns - // LONG_MAX or LONG_MIN when the input overflows.) - result != long_value - // The parsed value overflows as an Int32. - ) { - std::cerr << src_text << " is expected to be a 32-bit integer, " - << "but actually has value \"" << str << "\", " - << "which overflows.\n"; - return false; - } - - *value = result; - return true; -} - -// Parses 'str' for a double. If successful, writes the result to *value and -// returns true; otherwise leaves *value unchanged and returns false. -bool ParseDouble(const std::string& src_text, const char* str, double* value) { - // Parses the environment variable as a decimal integer. - char* end = nullptr; - const double double_value = strtod(str, &end); // NOLINT - - // Has strtol() consumed all characters in the string? - if (*end != '\0') { - // No - an invalid character was encountered. - std::cerr << src_text << " is expected to be a double, " - << "but actually has value \"" << str << "\".\n"; - return false; - } - - *value = double_value; - return true; -} - -// Returns the name of the environment variable corresponding to the -// given flag. For example, FlagToEnvVar("foo") will return -// "BENCHMARK_FOO" in the open-source version. -static std::string FlagToEnvVar(const char* flag) { - const std::string flag_str(flag); - - std::string env_var; - for (size_t i = 0; i != flag_str.length(); ++i) - env_var += static_cast(::toupper(flag_str.c_str()[i])); - - return "BENCHMARK_" + env_var; -} - -// Reads and returns the Boolean environment variable corresponding to -// the given flag; if it's not set, returns default_value. -// -// The value is considered true iff it's not "0". -bool BoolFromEnv(const char* flag, bool default_value) { - const std::string env_var = FlagToEnvVar(flag); - const char* const string_value = getenv(env_var.c_str()); - return string_value == nullptr ? default_value - : strcmp(string_value, "0") != 0; -} - -// Reads and returns a 32-bit integer stored in the environment -// variable corresponding to the given flag; if it isn't set or -// doesn't represent a valid 32-bit integer, returns default_value. -int32_t Int32FromEnv(const char* flag, int32_t default_value) { - const std::string env_var = FlagToEnvVar(flag); - const char* const string_value = getenv(env_var.c_str()); - if (string_value == nullptr) { - // The environment variable is not set. - return default_value; - } - - int32_t result = default_value; - if (!ParseInt32(std::string("Environment variable ") + env_var, string_value, - &result)) { - std::cout << "The default value " << default_value << " is used.\n"; - return default_value; - } - - return result; -} - -// Reads and returns the string environment variable corresponding to -// the given flag; if it's not set, returns default_value. -const char* StringFromEnv(const char* flag, const char* default_value) { - const std::string env_var = FlagToEnvVar(flag); - const char* const value = getenv(env_var.c_str()); - return value == nullptr ? default_value : value; -} - -// Parses a string as a command line flag. The string should have -// the format "--flag=value". When def_optional is true, the "=value" -// part can be omitted. -// -// Returns the value of the flag, or nullptr if the parsing failed. -const char* ParseFlagValue(const char* str, const char* flag, - bool def_optional) { - // str and flag must not be nullptr. - if (str == nullptr || flag == nullptr) return nullptr; - - // The flag must start with "--". - const std::string flag_str = std::string("--") + std::string(flag); - const size_t flag_len = flag_str.length(); - if (strncmp(str, flag_str.c_str(), flag_len) != 0) return nullptr; - - // Skips the flag name. - const char* flag_end = str + flag_len; - - // When def_optional is true, it's OK to not have a "=value" part. - if (def_optional && (flag_end[0] == '\0')) return flag_end; - - // If def_optional is true and there are more characters after the - // flag name, or if def_optional is false, there must be a '=' after - // the flag name. - if (flag_end[0] != '=') return nullptr; - - // Returns the string after "=". - return flag_end + 1; -} - -bool ParseBoolFlag(const char* str, const char* flag, bool* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, true); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Converts the string value to a bool. - *value = IsTruthyFlagValue(value_str); - return true; -} - -bool ParseInt32Flag(const char* str, const char* flag, int32_t* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Sets *value to the value of the flag. - return ParseInt32(std::string("The value of flag --") + flag, value_str, - value); -} - -bool ParseDoubleFlag(const char* str, const char* flag, double* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - // Sets *value to the value of the flag. - return ParseDouble(std::string("The value of flag --") + flag, value_str, - value); -} - -bool ParseStringFlag(const char* str, const char* flag, std::string* value) { - // Gets the value of the flag as a string. - const char* const value_str = ParseFlagValue(str, flag, false); - - // Aborts if the parsing failed. - if (value_str == nullptr) return false; - - *value = value_str; - return true; -} - -bool IsFlag(const char* str, const char* flag) { - return (ParseFlagValue(str, flag, true) != nullptr); -} - -bool IsTruthyFlagValue(const std::string& value) { - if (value.empty()) return true; - char ch = value[0]; - return isalnum(ch) && - !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N'); -} -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/counter.h @@ -1,26 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" - -namespace benchmark { - -// these counter-related functions are hidden to reduce API surface. -namespace internal { -void Finish(UserCounters *l, double time, double num_threads); -void Increment(UserCounters *l, UserCounters const& r); -bool SameNames(UserCounters const& l, UserCounters const& r); -} // end namespace internal - -} //end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/counter.cc @@ -1,68 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "counter.h" - -namespace benchmark { -namespace internal { - -double Finish(Counter const& c, double cpu_time, double num_threads) { - double v = c.value; - if (c.flags & Counter::kIsRate) { - v /= cpu_time; - } - if (c.flags & Counter::kAvgThreads) { - v /= num_threads; - } - return v; -} - -void Finish(UserCounters *l, double cpu_time, double num_threads) { - for (auto &c : *l) { - c.second.value = Finish(c.second, cpu_time, num_threads); - } -} - -void Increment(UserCounters *l, UserCounters const& r) { - // add counters present in both or just in *l - for (auto &c : *l) { - auto it = r.find(c.first); - if (it != r.end()) { - c.second.value = c.second + it->second; - } - } - // add counters present in r, but not in *l - for (auto const &tc : r) { - auto it = l->find(tc.first); - if (it == l->end()) { - (*l)[tc.first] = tc.second; - } - } -} - -bool SameNames(UserCounters const& l, UserCounters const& r) { - if (&l == &r) return true; - if (l.size() != r.size()) { - return false; - } - for (auto const& c : l) { - if (r.find(c.first) == r.end()) { - return false; - } - } - return true; -} - -} // end namespace internal -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/csv_reporter.cc @@ -1,149 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "benchmark/benchmark.h" -#include "complexity.h" - -#include -#include -#include -#include -#include -#include - -#include "string_util.h" -#include "timers.h" -#include "check.h" - -// File format reference: http://edoceo.com/utilitas/csv-file-format. - -namespace benchmark { - -namespace { -std::vector elements = { - "name", "iterations", "real_time", "cpu_time", - "time_unit", "bytes_per_second", "items_per_second", "label", - "error_occurred", "error_message"}; -} // namespace - -bool CSVReporter::ReportContext(const Context& context) { - PrintBasicContext(&GetErrorStream(), context); - return true; -} - -void CSVReporter::ReportRuns(const std::vector & reports) { - std::ostream& Out = GetOutputStream(); - - if (!printed_header_) { - // save the names of all the user counters - for (const auto& run : reports) { - for (const auto& cnt : run.counters) { - user_counter_names_.insert(cnt.first); - } - } - - // print the header - for (auto B = elements.begin(); B != elements.end();) { - Out << *B++; - if (B != elements.end()) Out << ","; - } - for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) { - Out << ",\"" << *B++ << "\""; - } - Out << "\n"; - - printed_header_ = true; - } else { - // check that all the current counters are saved in the name set - for (const auto& run : reports) { - for (const auto& cnt : run.counters) { - CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) - << "All counters must be present in each run. " - << "Counter named \"" << cnt.first - << "\" was not in a run after being added to the header"; - } - } - } - - // print results for each run - for (const auto& run : reports) { - PrintRunData(run); - } - -} - -void CSVReporter::PrintRunData(const Run & run) { - std::ostream& Out = GetOutputStream(); - - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string name = run.benchmark_name; - ReplaceAll(&name, "\"", "\"\""); - Out << '"' << name << "\","; - if (run.error_occurred) { - Out << std::string(elements.size() - 3, ','); - Out << "true,"; - std::string msg = run.error_message; - ReplaceAll(&msg, "\"", "\"\""); - Out << '"' << msg << "\"\n"; - return; - } - - // Do not print iteration on bigO and RMS report - if (!run.report_big_o && !run.report_rms) { - Out << run.iterations; - } - Out << ","; - - Out << run.GetAdjustedRealTime() << ","; - Out << run.GetAdjustedCPUTime() << ","; - - // Do not print timeLabel on bigO and RMS report - if (run.report_big_o) { - Out << GetBigOString(run.complexity); - } else if (!run.report_rms) { - Out << GetTimeUnitString(run.time_unit); - } - Out << ","; - - if (run.bytes_per_second > 0.0) { - Out << run.bytes_per_second; - } - Out << ","; - if (run.items_per_second > 0.0) { - Out << run.items_per_second; - } - Out << ","; - if (!run.report_label.empty()) { - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string label = run.report_label; - ReplaceAll(&label, "\"", "\"\""); - Out << "\"" << label << "\""; - } - Out << ",,"; // for error_occurred and error_message - - // Print user counters - for (const auto &ucn : user_counter_names_) { - auto it = run.counters.find(ucn); - if(it == run.counters.end()) { - Out << ","; - } else { - Out << "," << it->second; - } - } - Out << '\n'; -} - -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/cycleclock.h @@ -1,172 +0,0 @@ -// ---------------------------------------------------------------------- -// CycleClock -// A CycleClock tells you the current time in Cycles. The "time" -// is actually time since power-on. This is like time() but doesn't -// involve a system call and is much more precise. -// -// NOTE: Not all cpu/platform/kernel combinations guarantee that this -// clock increments at a constant rate or is synchronized across all logical -// cpus in a system. -// -// If you need the above guarantees, please consider using a different -// API. There are efforts to provide an interface which provides a millisecond -// granularity and implemented as a memory read. A memory read is generally -// cheaper than the CycleClock for many architectures. -// -// Also, in some out of order CPU implementations, the CycleClock is not -// serializing. So if you're trying to count at cycles granularity, your -// data might be inaccurate due to out of order instruction execution. -// ---------------------------------------------------------------------- - -#ifndef BENCHMARK_CYCLECLOCK_H_ -#define BENCHMARK_CYCLECLOCK_H_ - -#include - -#include "benchmark/benchmark.h" -#include "internal_macros.h" - -#if defined(BENCHMARK_OS_MACOSX) -#include -#endif -// For MSVC, we want to use '_asm rdtsc' when possible (since it works -// with even ancient MSVC compilers), and when not possible the -// __rdtsc intrinsic, declared in . Unfortunately, in some -// environments, and have conflicting -// declarations of some other intrinsics, breaking compilation. -// Therefore, we simply declare __rdtsc ourselves. See also -// http://connect.microsoft.com/VisualStudio/feedback/details/262047 -#if defined(COMPILER_MSVC) && !defined(_M_IX86) -extern "C" uint64_t __rdtsc(); -#pragma intrinsic(__rdtsc) -#endif - -#ifndef BENCHMARK_OS_WINDOWS -#include -#include -#endif - -#ifdef BENCHMARK_OS_EMSCRIPTEN -#include -#endif - -namespace benchmark { -// NOTE: only i386 and x86_64 have been well tested. -// PPC, sparc, alpha, and ia64 are based on -// http://peter.kuscsik.com/wordpress/?p=14 -// with modifications by m3b. See also -// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h -namespace cycleclock { -// This should return the number of cycles since power-on. Thread-safe. -inline BENCHMARK_ALWAYS_INLINE int64_t Now() { -#if defined(BENCHMARK_OS_MACOSX) - // this goes at the top because we need ALL Macs, regardless of - // architecture, to return the number of "mach time units" that - // have passed since startup. See sysinfo.cc where - // InitializeSystemInfo() sets the supposed cpu clock frequency of - // macs to the number of mach time units per second, not actual - // CPU clock frequency (which can change in the face of CPU - // frequency scaling). Also note that when the Mac sleeps, this - // counter pauses; it does not continue counting, nor does it - // reset to zero. - return mach_absolute_time(); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // this goes above x86-specific code because old versions of Emscripten - // define __x86_64__, although they have nothing to do with it. - return static_cast(emscripten_get_now() * 1e+6); -#elif defined(__i386__) - int64_t ret; - __asm__ volatile("rdtsc" : "=A"(ret)); - return ret; -#elif defined(__x86_64__) || defined(__amd64__) - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -#elif defined(__powerpc__) || defined(__ppc__) - // This returns a time-base, which is not always precisely a cycle-count. - int64_t tbl, tbu0, tbu1; - asm("mftbu %0" : "=r"(tbu0)); - asm("mftb %0" : "=r"(tbl)); - asm("mftbu %0" : "=r"(tbu1)); - tbl &= -static_cast(tbu0 == tbu1); - // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage) - return (tbu1 << 32) | tbl; -#elif defined(__sparc__) - int64_t tick; - asm(".byte 0x83, 0x41, 0x00, 0x00"); - asm("mov %%g1, %0" : "=r"(tick)); - return tick; -#elif defined(__ia64__) - int64_t itc; - asm("mov %0 = ar.itc" : "=r"(itc)); - return itc; -#elif defined(COMPILER_MSVC) && defined(_M_IX86) - // Older MSVC compilers (like 7.x) don't seem to support the - // __rdtsc intrinsic properly, so I prefer to use _asm instead - // when I know it will work. Otherwise, I'll use __rdtsc and hope - // the code is being compiled with a non-ancient compiler. - _asm rdtsc -#elif defined(COMPILER_MSVC) - return __rdtsc(); -#elif defined(BENCHMARK_OS_NACL) - // Native Client validator on x86/x86-64 allows RDTSC instructions, - // and this case is handled above. Native Client validator on ARM - // rejects MRC instructions (used in the ARM-specific sequence below), - // so we handle it here. Portable Native Client compiles to - // architecture-agnostic bytecode, which doesn't provide any - // cycle counter access mnemonics. - - // Native Client does not provide any API to access cycle counter. - // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday - // because is provides nanosecond resolution (which is noticable at - // least for PNaCl modules running on x86 Mac & Linux). - // Initialize to always return 0 if clock_gettime fails. - struct timespec ts = { 0, 0 }; - clock_gettime(CLOCK_MONOTONIC, &ts); - return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; -#elif defined(__aarch64__) - // System timer of ARMv8 runs at a different frequency than the CPU's. - // The frequency is fixed, typically in the range 1-50MHz. It can be - // read at CNTFRQ special register. We assume the OS has set up - // the virtual timer properly. - int64_t virtual_timer_value; - asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); - return virtual_timer_value; -#elif defined(__ARM_ARCH) - // V6 is the earliest arch that has a standard cyclecount - // Native Client validator doesn't allow MRC instructions. -#if (__ARM_ARCH >= 6) - uint32_t pmccntr; - uint32_t pmuseren; - uint32_t pmcntenset; - // Read the user mode perf monitor counter access permissions. - asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); - if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. - asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); - if (pmcntenset & 0x80000000ul) { // Is it counting? - asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); - // The counter is set up to count every 64th cycle - return static_cast(pmccntr) * 64; // Should optimize to << 6 - } - } -#endif - struct timeval tv; - gettimeofday(&tv, nullptr); - return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__mips__) - // mips apparently only allows rdtsc for superusers, so we fall - // back to gettimeofday. It's possible clock_gettime would be better. - struct timeval tv; - gettimeofday(&tv, nullptr); - return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#else -// The soft failover to a generic implementation is automatic only for ARM. -// For other platforms the developer is expected to make an attempt to create -// a fast implementation and use generic version if nothing better is available. -#error You need to define CycleTimer for your OS and CPU -#endif -} -} // end namespace cycleclock -} // end namespace benchmark - -#endif // BENCHMARK_CYCLECLOCK_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/internal_macros.h @@ -1,57 +0,0 @@ -#ifndef BENCHMARK_INTERNAL_MACROS_H_ -#define BENCHMARK_INTERNAL_MACROS_H_ - -#include "benchmark/benchmark.h" - -#ifndef __has_feature -#define __has_feature(x) 0 -#endif - -#if defined(__clang__) -#define COMPILER_CLANG -#elif defined(_MSC_VER) -#define COMPILER_MSVC -#elif defined(__GNUC__) -#define COMPILER_GCC -#endif - -#if __has_feature(cxx_attributes) -#define BENCHMARK_NORETURN [[noreturn]] -#elif defined(__GNUC__) -#define BENCHMARK_NORETURN __attribute__((noreturn)) -#elif defined(COMPILER_MSVC) -#define BENCHMARK_NORETURN __declspec(noreturn) -#else -#define BENCHMARK_NORETURN -#endif - -#if defined(__CYGWIN__) -#define BENCHMARK_OS_CYGWIN 1 -#elif defined(_WIN32) -#define BENCHMARK_OS_WINDOWS 1 -#elif defined(__APPLE__) -#include "TargetConditionals.h" - #if defined(TARGET_OS_MAC) - #define BENCHMARK_OS_MACOSX 1 - #if defined(TARGET_OS_IPHONE) - #define BENCHMARK_OS_IOS 1 - #endif - #endif -#elif defined(__FreeBSD__) -#define BENCHMARK_OS_FREEBSD 1 -#elif defined(__linux__) -#define BENCHMARK_OS_LINUX 1 -#elif defined(__native_client__) -#define BENCHMARK_OS_NACL 1 -#elif defined(EMSCRIPTEN) -#define BENCHMARK_OS_EMSCRIPTEN 1 -#elif defined(__rtems__) -#define BENCHMARK_OS_RTEMS 1 -#endif - -#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ - && !defined(__EXCEPTIONS) -#define BENCHMARK_HAS_NO_EXCEPTIONS -#endif - -#endif // BENCHMARK_INTERNAL_MACROS_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/log.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/log.h @@ -1,73 +0,0 @@ -#ifndef BENCHMARK_LOG_H_ -#define BENCHMARK_LOG_H_ - -#include -#include - -#include "benchmark/benchmark.h" - -namespace benchmark { -namespace internal { - -typedef std::basic_ostream&(EndLType)(std::basic_ostream&); - -class LogType { - friend LogType& GetNullLogInstance(); - friend LogType& GetErrorLogInstance(); - - // FIXME: Add locking to output. - template - friend LogType& operator<<(LogType&, Tp const&); - friend LogType& operator<<(LogType&, EndLType*); - - private: - LogType(std::ostream* out) : out_(out) {} - std::ostream* out_; - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType); -}; - -template -LogType& operator<<(LogType& log, Tp const& value) { - if (log.out_) { - *log.out_ << value; - } - return log; -} - -inline LogType& operator<<(LogType& log, EndLType* m) { - if (log.out_) { - *log.out_ << m; - } - return log; -} - -inline int& LogLevel() { - static int log_level = 0; - return log_level; -} - -inline LogType& GetNullLogInstance() { - static LogType log(nullptr); - return log; -} - -inline LogType& GetErrorLogInstance() { - static LogType log(&std::clog); - return log; -} - -inline LogType& GetLogInstanceForLevel(int level) { - if (level <= LogLevel()) { - return GetErrorLogInstance(); - } - return GetNullLogInstance(); -} - -} // end namespace internal -} // end namespace benchmark - -#define VLOG(x) \ - (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ - " ") - -#endif Index: MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/mutex.h @@ -1,155 +0,0 @@ -#ifndef BENCHMARK_MUTEX_H_ -#define BENCHMARK_MUTEX_H_ - -#include -#include - -#include "check.h" - -// Enable thread safety attributes only with clang. -// The attributes can be safely erased when compiling with other compilers. -#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) -#else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op -#endif - -#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) - -#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) - -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) - -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) - -#define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) - -#define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) - -#define REQUIRES(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) - -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) - -#define ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) - -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) - -#define RELEASE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) - -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) - -#define TRY_ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) - -#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) - -#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) - -#define ASSERT_SHARED_CAPABILITY(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) - -#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) - -#define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) - -namespace benchmark { - -typedef std::condition_variable Condition; - -// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that -// we can annotate them with thread safety attributes and use the -// -Wthread-safety warning with clang. The standard library types cannot be -// used directly because they do not provided the required annotations. -class CAPABILITY("mutex") Mutex { - public: - Mutex() {} - - void lock() ACQUIRE() { mut_.lock(); } - void unlock() RELEASE() { mut_.unlock(); } - std::mutex& native_handle() { return mut_; } - - private: - std::mutex mut_; -}; - -class SCOPED_CAPABILITY MutexLock { - typedef std::unique_lock MutexLockImp; - - public: - MutexLock(Mutex& m) ACQUIRE(m) : ml_(m.native_handle()) {} - ~MutexLock() RELEASE() {} - MutexLockImp& native_handle() { return ml_; } - - private: - MutexLockImp ml_; -}; - -class Barrier { - public: - Barrier(int num_threads) : running_threads_(num_threads) {} - - // Called by each thread - bool wait() EXCLUDES(lock_) { - bool last_thread = false; - { - MutexLock ml(lock_); - last_thread = createBarrier(ml); - } - if (last_thread) phase_condition_.notify_all(); - return last_thread; - } - - void removeThread() EXCLUDES(lock_) { - MutexLock ml(lock_); - --running_threads_; - if (entered_ != 0) phase_condition_.notify_all(); - } - - private: - Mutex lock_; - Condition phase_condition_; - int running_threads_; - - // State for barrier management - int phase_number_ = 0; - int entered_ = 0; // Number of threads that have entered this barrier - - // Enter the barrier and wait until all other threads have also - // entered the barrier. Returns iff this is the last thread to - // enter the barrier. - bool createBarrier(MutexLock& ml) REQUIRES(lock_) { - CHECK_LT(entered_, running_threads_); - entered_++; - if (entered_ < running_threads_) { - // Wait for all threads to enter - int phase_number_cp = phase_number_; - auto cb = [this, phase_number_cp]() { - return this->phase_number_ > phase_number_cp || - entered_ == running_threads_; // A thread has aborted in error - }; - phase_condition_.wait(ml.native_handle(), cb); - if (phase_number_ > phase_number_cp) return false; - // else (running_threads_ == entered_) and we are the last thread. - } - // Last thread has reached the barrier - phase_number_++; - entered_ = 0; - return true; - } -}; - -} // end namespace benchmark - -#endif // BENCHMARK_MUTEX_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/re.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/re.h @@ -1,140 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef BENCHMARK_RE_H_ -#define BENCHMARK_RE_H_ - -#include "internal_macros.h" - -// Prefer C regex libraries when compiling w/o exceptions so that we can -// correctly report errors. -#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && defined(HAVE_STD_REGEX) && \ - (defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX)) -#undef HAVE_STD_REGEX -#endif - -#if defined(HAVE_STD_REGEX) -#include -#elif defined(HAVE_GNU_POSIX_REGEX) -#include -#elif defined(HAVE_POSIX_REGEX) -#include -#else -#error No regular expression backend was found! -#endif -#include - -#include "check.h" - -namespace benchmark { - -// A wrapper around the POSIX regular expression API that provides automatic -// cleanup -class Regex { - public: - Regex() : init_(false) {} - - ~Regex(); - - // Compile a regular expression matcher from spec. Returns true on success. - // - // On failure (and if error is not nullptr), error is populated with a human - // readable error message if an error occurs. - bool Init(const std::string& spec, std::string* error); - - // Returns whether str matches the compiled regular expression. - bool Match(const std::string& str); - - private: - bool init_; -// Underlying regular expression object -#if defined(HAVE_STD_REGEX) - std::regex re_; -#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) - regex_t re_; -#else -#error No regular expression backend implementation available -#endif -}; - -#if defined(HAVE_STD_REGEX) - -inline bool Regex::Init(const std::string& spec, std::string* error) { -#ifdef BENCHMARK_HAS_NO_EXCEPTIONS - ((void)error); // suppress unused warning -#else - try { -#endif - re_ = std::regex(spec, std::regex_constants::extended); - init_ = true; -#ifndef BENCHMARK_HAS_NO_EXCEPTIONS - } catch (const std::regex_error& e) { - if (error) { - *error = e.what(); - } - } -#endif - return init_; -} - -inline Regex::~Regex() {} - -inline bool Regex::Match(const std::string& str) { - if (!init_) { - return false; - } - return std::regex_search(str, re_); -} - -#else -inline bool Regex::Init(const std::string& spec, std::string* error) { - int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB); - if (ec != 0) { - if (error) { - size_t needed = regerror(ec, &re_, nullptr, 0); - char* errbuf = new char[needed]; - regerror(ec, &re_, errbuf, needed); - - // regerror returns the number of bytes necessary to null terminate - // the string, so we move that when assigning to error. - CHECK_NE(needed, 0); - error->assign(errbuf, needed - 1); - - delete[] errbuf; - } - - return false; - } - - init_ = true; - return true; -} - -inline Regex::~Regex() { - if (init_) { - regfree(&re_); - } -} - -inline bool Regex::Match(const std::string& str) { - if (!init_) { - return false; - } - return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0; -} -#endif - -} // end namespace benchmark - -#endif // BENCHMARK_RE_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.h @@ -1,15 +0,0 @@ -#ifndef BENCHMARK_SLEEP_H_ -#define BENCHMARK_SLEEP_H_ - -namespace benchmark { -const int kNumMillisPerSecond = 1000; -const int kNumMicrosPerMilli = 1000; -const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000; -const int kNumNanosPerMicro = 1000; -const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond; - -void SleepForMilliseconds(int milliseconds); -void SleepForSeconds(double seconds); -} // end namespace benchmark - -#endif // BENCHMARK_SLEEP_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sleep.cc @@ -1,51 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sleep.h" - -#include -#include -#include - -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#endif - -namespace benchmark { -#ifdef BENCHMARK_OS_WINDOWS -// Window's Sleep takes milliseconds argument. -void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } -void SleepForSeconds(double seconds) { - SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); -} -#else // BENCHMARK_OS_WINDOWS -void SleepForMicroseconds(int microseconds) { - struct timespec sleep_time; - sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; - sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; - while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) - ; // Ignore signals and wait for the full interval to elapse. -} - -void SleepForMilliseconds(int milliseconds) { - SleepForMicroseconds(milliseconds * kNumMicrosPerMilli); -} - -void SleepForSeconds(double seconds) { - SleepForMicroseconds(static_cast(seconds * kNumMicrosPerSecond)); -} -#endif // BENCHMARK_OS_WINDOWS -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.2.0/src/stat.h +++ /dev/null @@ -1,310 +0,0 @@ -#ifndef BENCHMARK_STAT_H_ -#define BENCHMARK_STAT_H_ - -#include -#include -#include -#include - -namespace benchmark { - -template -class Stat1; - -template -class Stat1MinMax; - -typedef Stat1 Stat1_f; -typedef Stat1 Stat1_d; -typedef Stat1MinMax Stat1MinMax_f; -typedef Stat1MinMax Stat1MinMax_d; - -template -class Vector2; -template -class Vector3; -template -class Vector4; - -template -class Stat1 { - public: - typedef Stat1 Self; - - Stat1() { Clear(); } - // Create a sample of value dat and weight 1 - explicit Stat1(const VType &dat) { - sum_ = dat; - sum_squares_ = Sqr(dat); - numsamples_ = 1; - } - // Create statistics for all the samples between begin (included) - // and end(excluded) - explicit Stat1(const VType *begin, const VType *end) { - Clear(); - for (const VType *item = begin; item < end; ++item) { - (*this) += Stat1(*item); - } - } - // Create a sample of value dat and weight w - Stat1(const VType &dat, const NumType &w) { - sum_ = w * dat; - sum_squares_ = w * Sqr(dat); - numsamples_ = w; - } - // Copy operator - Stat1(const Self &stat) { - sum_ = stat.sum_; - sum_squares_ = stat.sum_squares_; - numsamples_ = stat.numsamples_; - } - - void Clear() { - numsamples_ = NumType(); - sum_squares_ = sum_ = VType(); - } - - Self &operator=(const Self &stat) { - sum_ = stat.sum_; - sum_squares_ = stat.sum_squares_; - numsamples_ = stat.numsamples_; - return (*this); - } - // Merge statistics from two sample sets. - Self &operator+=(const Self &stat) { - sum_ += stat.sum_; - sum_squares_ += stat.sum_squares_; - numsamples_ += stat.numsamples_; - return (*this); - } - // The operation opposite to += - Self &operator-=(const Self &stat) { - sum_ -= stat.sum_; - sum_squares_ -= stat.sum_squares_; - numsamples_ -= stat.numsamples_; - return (*this); - } - // Multiply the weight of the set of samples by a factor k - Self &operator*=(const VType &k) { - sum_ *= k; - sum_squares_ *= k; - numsamples_ *= k; - return (*this); - } - - // Merge statistics from two sample sets. - Self operator+(const Self &stat) const { return Self(*this) += stat; } - - // The operation opposite to + - Self operator-(const Self &stat) const { return Self(*this) -= stat; } - - // Multiply the weight of the set of samples by a factor k - Self operator*(const VType &k) const { return Self(*this) *= k; } - - // Return the total weight of this sample set - NumType numSamples() const { return numsamples_; } - - // Return the sum of this sample set - VType Sum() const { return sum_; } - - // Return the mean of this sample set - VType Mean() const { - if (numsamples_ == 0) return VType(); - return sum_ * (1.0 / numsamples_); - } - - // Return the mean of this sample set and compute the standard deviation at - // the same time. - VType Mean(VType *stddev) const { - if (numsamples_ == 0) return VType(); - VType mean = sum_ * (1.0 / numsamples_); - if (stddev) { - // Sample standard deviation is undefined for n = 1 - if (numsamples_ == 1) { - *stddev = VType(); - } else { - VType avg_squares = sum_squares_ * (1.0 / numsamples_); - *stddev = Sqrt(numsamples_ / (numsamples_ - 1.0) * (avg_squares - Sqr(mean))); - } - } - return mean; - } - - // Return the standard deviation of the sample set - VType StdDev() const { - VType stddev = VType(); - Mean(&stddev); - return stddev; - } - - private: - static_assert(std::is_integral::value && - !std::is_same::value, - "NumType must be an integral type that is not bool."); - // Let i be the index of the samples provided (using +=) - // and weight[i],value[i] be the data of sample #i - // then the variables have the following meaning: - NumType numsamples_; // sum of weight[i]; - VType sum_; // sum of weight[i]*value[i]; - VType sum_squares_; // sum of weight[i]*value[i]^2; - - // Template function used to square a number. - // For a vector we square all components - template - static inline SType Sqr(const SType &dat) { - return dat * dat; - } - - template - static inline Vector2 Sqr(const Vector2 &dat) { - return dat.MulComponents(dat); - } - - template - static inline Vector3 Sqr(const Vector3 &dat) { - return dat.MulComponents(dat); - } - - template - static inline Vector4 Sqr(const Vector4 &dat) { - return dat.MulComponents(dat); - } - - // Template function used to take the square root of a number. - // For a vector we square all components - template - static inline SType Sqrt(const SType &dat) { - // Avoid NaN due to imprecision in the calculations - if (dat < 0) return 0; - return sqrt(dat); - } - - template - static inline Vector2 Sqrt(const Vector2 &dat) { - // Avoid NaN due to imprecision in the calculations - return Max(dat, Vector2()).Sqrt(); - } - - template - static inline Vector3 Sqrt(const Vector3 &dat) { - // Avoid NaN due to imprecision in the calculations - return Max(dat, Vector3()).Sqrt(); - } - - template - static inline Vector4 Sqrt(const Vector4 &dat) { - // Avoid NaN due to imprecision in the calculations - return Max(dat, Vector4()).Sqrt(); - } -}; - -// Useful printing function -template -std::ostream &operator<<(std::ostream &out, const Stat1 &s) { - out << "{ avg = " << s.Mean() << " std = " << s.StdDev() - << " nsamples = " << s.NumSamples() << "}"; - return out; -} - -// Stat1MinMax: same as Stat1, but it also -// keeps the Min and Max values; the "-" -// operator is disabled because it cannot be implemented -// efficiently -template -class Stat1MinMax : public Stat1 { - public: - typedef Stat1MinMax Self; - - Stat1MinMax() { Clear(); } - // Create a sample of value dat and weight 1 - explicit Stat1MinMax(const VType &dat) : Stat1(dat) { - max_ = dat; - min_ = dat; - } - // Create statistics for all the samples between begin (included) - // and end(excluded) - explicit Stat1MinMax(const VType *begin, const VType *end) { - Clear(); - for (const VType *item = begin; item < end; ++item) { - (*this) += Stat1MinMax(*item); - } - } - // Create a sample of value dat and weight w - Stat1MinMax(const VType &dat, const NumType &w) - : Stat1(dat, w) { - max_ = dat; - min_ = dat; - } - // Copy operator - Stat1MinMax(const Self &stat) : Stat1(stat) { - max_ = stat.max_; - min_ = stat.min_; - } - - void Clear() { - Stat1::Clear(); - if (std::numeric_limits::has_infinity) { - min_ = std::numeric_limits::infinity(); - max_ = -std::numeric_limits::infinity(); - } else { - min_ = std::numeric_limits::max(); - max_ = std::numeric_limits::min(); - } - } - - Self &operator=(const Self &stat) { - this->Stat1::operator=(stat); - max_ = stat.max_; - min_ = stat.min_; - return (*this); - } - // Merge statistics from two sample sets. - Self &operator+=(const Self &stat) { - this->Stat1::operator+=(stat); - if (stat.max_ > max_) max_ = stat.max_; - if (stat.min_ < min_) min_ = stat.min_; - return (*this); - } - // Multiply the weight of the set of samples by a factor k - Self &operator*=(const VType &stat) { - this->Stat1::operator*=(stat); - return (*this); - } - // Merge statistics from two sample sets. - Self operator+(const Self &stat) const { return Self(*this) += stat; } - // Multiply the weight of the set of samples by a factor k - Self operator*(const VType &k) const { return Self(*this) *= k; } - - // Return the maximal value in this sample set - VType Max() const { return max_; } - // Return the minimal value in this sample set - VType Min() const { return min_; } - - private: - // The - operation makes no sense with Min/Max - // unless we keep the full list of values (but we don't) - // make it private, and let it undefined so nobody can call it - Self &operator-=(const Self &stat); // senseless. let it undefined. - - // The operation opposite to - - Self operator-(const Self &stat) const; // senseless. let it undefined. - - // Let i be the index of the samples provided (using +=) - // and weight[i],value[i] be the data of sample #i - // then the variables have the following meaning: - VType max_; // max of value[i] - VType min_; // min of value[i] -}; - -// Useful printing function -template -std::ostream &operator<<(std::ostream &out, - const Stat1MinMax &s) { - out << "{ avg = " << s.Mean() << " std = " << s.StdDev() - << " nsamples = " << s.NumSamples() << " min = " << s.Min() - << " max = " << s.Max() << "}"; - return out; -} -} // end namespace benchmark - -#endif // BENCHMARK_STAT_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.h @@ -1,10 +0,0 @@ -#ifndef BENCHMARK_SYSINFO_H_ -#define BENCHMARK_SYSINFO_H_ - -namespace benchmark { -int NumCPUs(); -double CyclesPerSecond(); -bool CpuScalingEnabled(); -} // end namespace benchmark - -#endif // BENCHMARK_SYSINFO_H_ Index: MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/sysinfo.cc @@ -1,355 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "sysinfo.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#include -#include -#else -#include -#include -#include -#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD -#include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX -#include -#endif -#endif - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "arraysize.h" -#include "check.h" -#include "cycleclock.h" -#include "internal_macros.h" -#include "log.h" -#include "sleep.h" -#include "string_util.h" - -namespace benchmark { -namespace { -std::once_flag cpuinfo_init; -double cpuinfo_cycles_per_second = 1.0; -int cpuinfo_num_cpus = 1; // Conservative guess - -#if !defined BENCHMARK_OS_MACOSX -const int64_t estimate_time_ms = 1000; - -// Helper function estimates cycles/sec by observing cycles elapsed during -// sleep(). Using small sleep time decreases accuracy significantly. -int64_t EstimateCyclesPerSecond() { - const int64_t start_ticks = cycleclock::Now(); - SleepForMilliseconds(estimate_time_ms); - return cycleclock::Now() - start_ticks; -} -#endif - -#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN -// Helper function for reading an int from a file. Returns true if successful -// and the memory location pointed to by value is set to the value read. -bool ReadIntFromFile(const char* file, long* value) { - bool ret = false; - int fd = open(file, O_RDONLY); - if (fd != -1) { - char line[1024]; - char* err; - memset(line, '\0', sizeof(line)); - ssize_t read_err = read(fd, line, sizeof(line) - 1); - ((void)read_err); // prevent unused warning - CHECK(read_err >= 0); - const long temp_value = strtol(line, &err, 10); - if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { - *value = temp_value; - ret = true; - } - close(fd); - } - return ret; -} -#endif - -#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN -static std::string convertToLowerCase(std::string s) { - for (auto& ch : s) - ch = std::tolower(ch); - return s; -} -static bool startsWithKey(std::string Value, std::string Key, - bool IgnoreCase = true) { - if (IgnoreCase) { - Key = convertToLowerCase(std::move(Key)); - Value = convertToLowerCase(std::move(Value)); - } - return Value.compare(0, Key.size(), Key) == 0; -} -#endif - -void InitializeSystemInfo() { -#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN - char line[1024]; - char* err; - long freq; - - bool saw_mhz = false; - - // If the kernel is exporting the tsc frequency use that. There are issues - // where cpuinfo_max_freq cannot be relied on because the BIOS may be - // exporintg an invalid p-state (on x86) or p-states may be used to put the - // processor in a new mode (turbo mode). Essentially, those frequencies - // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as - // well. - if (!saw_mhz && - ReadIntFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { - // The value is in kHz (as the file name suggests). For example, on a - // 2GHz warpstation, the file contains the value "2000000". - cpuinfo_cycles_per_second = freq * 1000.0; - saw_mhz = true; - } - - // If CPU scaling is in effect, we want to use the *maximum* frequency, - // not whatever CPU speed some random processor happens to be using now. - if (!saw_mhz && - ReadIntFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", - &freq)) { - // The value is in kHz. For example, on a 2GHz warpstation, the file - // contains the value "2000000". - cpuinfo_cycles_per_second = freq * 1000.0; - saw_mhz = true; - } - - // Read /proc/cpuinfo for other values, and if there is no cpuinfo_max_freq. - const char* pname = "/proc/cpuinfo"; - int fd = open(pname, O_RDONLY); - if (fd == -1) { - perror(pname); - if (!saw_mhz) { - cpuinfo_cycles_per_second = - static_cast(EstimateCyclesPerSecond()); - } - return; - } - - double bogo_clock = 1.0; - bool saw_bogo = false; - long max_cpu_id = 0; - int num_cpus = 0; - line[0] = line[1] = '\0'; - size_t chars_read = 0; - do { // we'll exit when the last read didn't read anything - // Move the next line to the beginning of the buffer - const size_t oldlinelen = strlen(line); - if (sizeof(line) == oldlinelen + 1) // oldlinelen took up entire line - line[0] = '\0'; - else // still other lines left to save - memmove(line, line + oldlinelen + 1, sizeof(line) - (oldlinelen + 1)); - // Terminate the new line, reading more if we can't find the newline - char* newline = strchr(line, '\n'); - if (newline == nullptr) { - const size_t linelen = strlen(line); - const size_t bytes_to_read = sizeof(line) - 1 - linelen; - CHECK(bytes_to_read > 0); // because the memmove recovered >=1 bytes - chars_read = read(fd, line + linelen, bytes_to_read); - line[linelen + chars_read] = '\0'; - newline = strchr(line, '\n'); - } - if (newline != nullptr) *newline = '\0'; - - // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only - // accept postive values. Some environments (virtual machines) report zero, - // which would cause infinite looping in WallTime_Init. - if (!saw_mhz && startsWithKey(line, "cpu MHz")) { - const char* freqstr = strchr(line, ':'); - if (freqstr) { - cpuinfo_cycles_per_second = strtod(freqstr + 1, &err) * 1000000.0; - if (freqstr[1] != '\0' && *err == '\0' && cpuinfo_cycles_per_second > 0) - saw_mhz = true; - } - } else if (startsWithKey(line, "bogomips")) { - const char* freqstr = strchr(line, ':'); - if (freqstr) { - bogo_clock = strtod(freqstr + 1, &err) * 1000000.0; - if (freqstr[1] != '\0' && *err == '\0' && bogo_clock > 0) - saw_bogo = true; - } - } else if (startsWithKey(line, "processor", /*IgnoreCase*/false)) { - // The above comparison is case-sensitive because ARM kernels often - // include a "Processor" line that tells you about the CPU, distinct - // from the usual "processor" lines that give you CPU ids. No current - // Linux architecture is using "Processor" for CPU ids. - num_cpus++; // count up every time we see an "processor :" entry - const char* id_str = strchr(line, ':'); - if (id_str) { - const long cpu_id = strtol(id_str + 1, &err, 10); - if (id_str[1] != '\0' && *err == '\0' && max_cpu_id < cpu_id) - max_cpu_id = cpu_id; - } - } - } while (chars_read > 0); - close(fd); - - if (!saw_mhz) { - if (saw_bogo) { - // If we didn't find anything better, we'll use bogomips, but - // we're not happy about it. - cpuinfo_cycles_per_second = bogo_clock; - } else { - // If we don't even have bogomips, we'll use the slow estimation. - cpuinfo_cycles_per_second = - static_cast(EstimateCyclesPerSecond()); - } - } - if (num_cpus == 0) { - fprintf(stderr, "Failed to read num. CPUs correctly from /proc/cpuinfo\n"); - } else { - if ((max_cpu_id + 1) != num_cpus) { - fprintf(stderr, - "CPU ID assignments in /proc/cpuinfo seem messed up." - " This is usually caused by a bad BIOS.\n"); - } - cpuinfo_num_cpus = num_cpus; - } - -#elif defined BENCHMARK_OS_FREEBSD -// For this sysctl to work, the machine must be configured without -// SMP, APIC, or APM support. hz should be 64-bit in freebsd 7.0 -// and later. Before that, it's a 32-bit quantity (and gives the -// wrong answer on machines faster than 2^32 Hz). See -// http://lists.freebsd.org/pipermail/freebsd-i386/2004-November/001846.html -// But also compare FreeBSD 7.0: -// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG70#L223 -// 231 error = sysctl_handle_quad(oidp, &freq, 0, req); -// To FreeBSD 6.3 (it's the same in 6-STABLE): -// http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG6#L131 -// 139 error = sysctl_handle_int(oidp, &freq, sizeof(freq), req); -#if __FreeBSD__ >= 7 - uint64_t hz = 0; -#else - unsigned int hz = 0; -#endif - size_t sz = sizeof(hz); - const char* sysctl_path = "machdep.tsc_freq"; - if (sysctlbyname(sysctl_path, &hz, &sz, nullptr, 0) != 0) { - fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", - sysctl_path, strerror(errno)); - cpuinfo_cycles_per_second = static_cast(EstimateCyclesPerSecond()); - } else { - cpuinfo_cycles_per_second = hz; - } -// TODO: also figure out cpuinfo_num_cpus - -#elif defined BENCHMARK_OS_WINDOWS - // In NT, read MHz from the registry. If we fail to do so or we're in win9x - // then make a crude estimate. - DWORD data, data_size = sizeof(data); - if (IsWindowsXPOrGreater() && - SUCCEEDED( - SHGetValueA(HKEY_LOCAL_MACHINE, - "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", - "~MHz", nullptr, &data, &data_size))) - cpuinfo_cycles_per_second = - static_cast((int64_t)data * (int64_t)(1000 * 1000)); // was mhz - else - cpuinfo_cycles_per_second = static_cast(EstimateCyclesPerSecond()); - - SYSTEM_INFO sysinfo; - // Use memset as opposed to = {} to avoid GCC missing initializer false - // positives. - std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); - GetSystemInfo(&sysinfo); - cpuinfo_num_cpus = sysinfo.dwNumberOfProcessors; // number of logical - // processors in the current - // group - -#elif defined BENCHMARK_OS_MACOSX - int32_t num_cpus = 0; - size_t size = sizeof(num_cpus); - if (::sysctlbyname("hw.ncpu", &num_cpus, &size, nullptr, 0) == 0 && - (size == sizeof(num_cpus))) { - cpuinfo_num_cpus = num_cpus; - } else { - fprintf(stderr, "%s\n", strerror(errno)); - std::exit(EXIT_FAILURE); - } - int64_t cpu_freq = 0; - size = sizeof(cpu_freq); - if (::sysctlbyname("hw.cpufrequency", &cpu_freq, &size, nullptr, 0) == 0 && - (size == sizeof(cpu_freq))) { - cpuinfo_cycles_per_second = cpu_freq; - } else { - #if defined BENCHMARK_OS_IOS - fprintf(stderr, "CPU frequency cannot be detected. \n"); - cpuinfo_cycles_per_second = 0; - #else - fprintf(stderr, "%s\n", strerror(errno)); - std::exit(EXIT_FAILURE); - #endif - } -#else - // Generic cycles per second counter - cpuinfo_cycles_per_second = static_cast(EstimateCyclesPerSecond()); -#endif -} - -} // end namespace - -double CyclesPerSecond(void) { - std::call_once(cpuinfo_init, InitializeSystemInfo); - return cpuinfo_cycles_per_second; -} - -int NumCPUs(void) { - std::call_once(cpuinfo_init, InitializeSystemInfo); - return cpuinfo_num_cpus; -} - -// The ""'s catch people who don't pass in a literal for "str" -#define strliterallen(str) (sizeof("" str "") - 1) - -// Must use a string literal for prefix. -#define memprefix(str, len, prefix) \ - ((((len) >= strliterallen(prefix)) && \ - std::memcmp(str, prefix, strliterallen(prefix)) == 0) \ - ? str + strliterallen(prefix) \ - : nullptr) - -bool CpuScalingEnabled() { -#ifndef BENCHMARK_OS_WINDOWS - // On Linux, the CPUfreq subsystem exposes CPU information as files on the - // local file system. If reading the exported files fails, then we may not be - // running on Linux, so we silently ignore all the read errors. - for (int cpu = 0, num_cpus = NumCPUs(); cpu < num_cpus; ++cpu) { - std::string governor_file = - StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); - FILE* file = fopen(governor_file.c_str(), "r"); - if (!file) break; - char buff[16]; - size_t bytes_read = fread(buff, 1, sizeof(buff), file); - fclose(file); - if (memprefix(buff, bytes_read, "performance") == nullptr) return true; - } -#endif - return false; -} - -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/timers.h @@ -1,48 +0,0 @@ -#ifndef BENCHMARK_TIMERS_H -#define BENCHMARK_TIMERS_H - -#include -#include - -namespace benchmark { - -// Return the CPU usage of the current process -double ProcessCPUUsage(); - -// Return the CPU usage of the children of the current process -double ChildrenCPUUsage(); - -// Return the CPU usage of the current thread -double ThreadCPUUsage(); - -#if defined(HAVE_STEADY_CLOCK) -template -struct ChooseSteadyClock { - typedef std::chrono::high_resolution_clock type; -}; - -template <> -struct ChooseSteadyClock { - typedef std::chrono::steady_clock type; -}; -#endif - -struct ChooseClockType { -#if defined(HAVE_STEADY_CLOCK) - typedef ChooseSteadyClock<>::type type; -#else - typedef std::chrono::high_resolution_clock type; -#endif -}; - -inline double ChronoClockNow() { - typedef ChooseClockType::type ClockType; - using FpSeconds = std::chrono::duration; - return FpSeconds(ClockType::now().time_since_epoch()).count(); -} - -std::string LocalDateTimeString(); - -} // end namespace benchmark - -#endif // BENCHMARK_TIMERS_H Index: MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/src/timers.cc @@ -1,212 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "timers.h" -#include "internal_macros.h" - -#ifdef BENCHMARK_OS_WINDOWS -#include -#include -#include -#else -#include -#include -#include -#include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD -#include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX -#include -#endif -#if defined(BENCHMARK_OS_MACOSX) -#include -#include -#include -#endif -#endif - -#ifdef BENCHMARK_OS_EMSCRIPTEN -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "check.h" -#include "log.h" -#include "sleep.h" -#include "string_util.h" - -namespace benchmark { - -// Suppress unused warnings on helper functions. -#if defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wunused-function" -#endif - -namespace { -#if defined(BENCHMARK_OS_WINDOWS) -double MakeTime(FILETIME const& kernel_time, FILETIME const& user_time) { - ULARGE_INTEGER kernel; - ULARGE_INTEGER user; - kernel.HighPart = kernel_time.dwHighDateTime; - kernel.LowPart = kernel_time.dwLowDateTime; - user.HighPart = user_time.dwHighDateTime; - user.LowPart = user_time.dwLowDateTime; - return (static_cast(kernel.QuadPart) + - static_cast(user.QuadPart)) * - 1e-7; -} -#else -double MakeTime(struct rusage const& ru) { - return (static_cast(ru.ru_utime.tv_sec) + - static_cast(ru.ru_utime.tv_usec) * 1e-6 + - static_cast(ru.ru_stime.tv_sec) + - static_cast(ru.ru_stime.tv_usec) * 1e-6); -} -#endif -#if defined(BENCHMARK_OS_MACOSX) -double MakeTime(thread_basic_info_data_t const& info) { - return (static_cast(info.user_time.seconds) + - static_cast(info.user_time.microseconds) * 1e-6 + - static_cast(info.system_time.seconds) + - static_cast(info.system_time.microseconds) * 1e-6); -} -#endif -#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID) -double MakeTime(struct timespec const& ts) { - return ts.tv_sec + (static_cast(ts.tv_nsec) * 1e-9); -} -#endif - -BENCHMARK_NORETURN static void DiagnoseAndExit(const char* msg) { - std::cerr << "ERROR: " << msg << std::endl; - std::exit(EXIT_FAILURE); -} - -} // end namespace - -double ProcessCPUUsage() { -#if defined(BENCHMARK_OS_WINDOWS) - HANDLE proc = GetCurrentProcess(); - FILETIME creation_time; - FILETIME exit_time; - FILETIME kernel_time; - FILETIME user_time; - if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, - &user_time)) - return MakeTime(kernel_time, user_time); - DiagnoseAndExit("GetProccessTimes() failed"); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. - // Use Emscripten-specific API. Reported CPU time would be exactly the - // same as total time, but this is ok because there aren't long-latency - // syncronous system calls in Emscripten. - return emscripten_get_now() * 1e-3; -#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 - struct timespec spec; - if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) - return MakeTime(spec); - DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); -#else - struct rusage ru; - if (getrusage(RUSAGE_SELF, &ru) == 0) return MakeTime(ru); - DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed"); -#endif -} - -double ThreadCPUUsage() { -#if defined(BENCHMARK_OS_WINDOWS) - HANDLE this_thread = GetCurrentThread(); - FILETIME creation_time; - FILETIME exit_time; - FILETIME kernel_time; - FILETIME user_time; - GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, - &user_time); - return MakeTime(kernel_time, user_time); -#elif defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 - mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; - thread_basic_info_data_t info; - mach_port_t thread = pthread_mach_thread_np(pthread_self()); - if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == - KERN_SUCCESS) { - return MakeTime(info); - } - DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); -#elif defined(BENCHMARK_OS_EMSCRIPTEN) - // Emscripten doesn't support traditional threads - return ProcessCPUUsage(); -#elif defined(BENCHMARK_OS_RTEMS) - // RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See - // https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c - return ProcessCPUUsage(); -#elif defined(CLOCK_THREAD_CPUTIME_ID) - struct timespec ts; - if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) return MakeTime(ts); - DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed"); -#else -#error Per-thread timing is not available on your system. -#endif -} - -namespace { - -std::string DateTimeString(bool local) { - typedef std::chrono::system_clock Clock; - std::time_t now = Clock::to_time_t(Clock::now()); - const std::size_t kStorageSize = 128; - char storage[kStorageSize]; - std::size_t written; - - if (local) { -#if defined(BENCHMARK_OS_WINDOWS) - written = - std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now)); -#else - std::tm timeinfo; - std::memset(&timeinfo, 0, sizeof(std::tm)); - ::localtime_r(&now, &timeinfo); - written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); -#endif - } else { -#if defined(BENCHMARK_OS_WINDOWS) - written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now)); -#else - std::tm timeinfo; - std::memset(&timeinfo, 0, sizeof(std::tm)); - ::gmtime_r(&now, &timeinfo); - written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); -#endif - } - CHECK(written < kStorageSize); - ((void)written); // prevent unused variable in optimized mode. - return std::string(storage); -} - -} // end namespace - -std::string LocalDateTimeString() { return DateTimeString(true); } - -} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/test/donotoptimize_test.cc @@ -1,52 +0,0 @@ -#include "benchmark/benchmark.h" - -#include - -namespace { -#if defined(__GNUC__) -std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); -#endif -std::uint64_t double_up(const std::uint64_t x) { return x * 2; } -} - -// Using DoNotOptimize on types like BitRef seem to cause a lot of problems -// with the inline assembly on both GCC and Clang. -struct BitRef { - int index; - unsigned char &byte; - -public: - static BitRef Make() { - static unsigned char arr[2] = {}; - BitRef b(1, arr[0]); - return b; - } -private: - BitRef(int i, unsigned char& b) : index(i), byte(b) {} -}; - -int main(int, char*[]) { - // this test verifies compilation of DoNotOptimize() for some types - - char buffer8[8]; - benchmark::DoNotOptimize(buffer8); - - char buffer20[20]; - benchmark::DoNotOptimize(buffer20); - - char buffer1024[1024]; - benchmark::DoNotOptimize(buffer1024); - benchmark::DoNotOptimize(&buffer1024[0]); - - int x = 123; - benchmark::DoNotOptimize(x); - benchmark::DoNotOptimize(&x); - benchmark::DoNotOptimize(x += 42); - - benchmark::DoNotOptimize(double_up(x)); - - // These tests are to e - benchmark::DoNotOptimize(BitRef::Make()); - BitRef lval = BitRef::Make(); - benchmark::DoNotOptimize(lval); -} Index: MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/test/output_test.h @@ -1,201 +0,0 @@ -#ifndef TEST_OUTPUT_TEST_H -#define TEST_OUTPUT_TEST_H - -#undef NDEBUG -#include -#include -#include -#include -#include -#include -#include - -#include "../src/re.h" -#include "benchmark/benchmark.h" - -#define CONCAT2(x, y) x##y -#define CONCAT(x, y) CONCAT2(x, y) - -#define ADD_CASES(...) int CONCAT(dummy, __LINE__) = ::AddCases(__VA_ARGS__) - -#define SET_SUBSTITUTIONS(...) \ - int CONCAT(dummy, __LINE__) = ::SetSubstitutions(__VA_ARGS__) - -enum MatchRules { - MR_Default, // Skip non-matching lines until a match is found. - MR_Next, // Match must occur on the next line. - MR_Not // No line between the current position and the next match matches - // the regex -}; - -struct TestCase { - TestCase(std::string re, int rule = MR_Default); - - std::string regex_str; - int match_rule; - std::string substituted_regex; - std::shared_ptr regex; -}; - -enum TestCaseID { - TC_ConsoleOut, - TC_ConsoleErr, - TC_JSONOut, - TC_JSONErr, - TC_CSVOut, - TC_CSVErr, - - TC_NumID // PRIVATE -}; - -// Add a list of test cases to be run against the output specified by -// 'ID' -int AddCases(TestCaseID ID, std::initializer_list il); - -// Add or set a list of substitutions to be performed on constructed regex's -// See 'output_test_helper.cc' for a list of default substitutions. -int SetSubstitutions( - std::initializer_list> il); - -// Run all output tests. -void RunOutputTests(int argc, char* argv[]); - -// ========================================================================= // -// ------------------------- Results checking ------------------------------ // -// ========================================================================= // - -// Call this macro to register a benchmark for checking its results. This -// should be all that's needed. It subscribes a function to check the (CSV) -// results of a benchmark. This is done only after verifying that the output -// strings are really as expected. -// bm_name_pattern: a name or a regex pattern which will be matched against -// all the benchmark names. Matching benchmarks -// will be the subject of a call to checker_function -// checker_function: should be of type ResultsCheckFn (see below) -#define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ - size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) - -struct Results; -typedef std::function< void(Results const&) > ResultsCheckFn; - -size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); - -// Class holding the results of a benchmark. -// It is passed in calls to checker functions. -struct Results { - - // the benchmark name - std::string name; - // the benchmark fields - std::map< std::string, std::string > values; - - Results(const std::string& n) : name(n) {} - - int NumThreads() const; - - typedef enum { kCpuTime, kRealTime } BenchmarkTime; - - // get cpu_time or real_time in seconds - double GetTime(BenchmarkTime which) const; - - // get the real_time duration of the benchmark in seconds. - // it is better to use fuzzy float checks for this, as the float - // ASCII formatting is lossy. - double DurationRealTime() const { - return GetAs< double >("iterations") * GetTime(kRealTime); - } - // get the cpu_time duration of the benchmark in seconds - double DurationCPUTime() const { - return GetAs< double >("iterations") * GetTime(kCpuTime); - } - - // get the string for a result by name, or nullptr if the name - // is not found - const std::string* Get(const char* entry_name) const { - auto it = values.find(entry_name); - if(it == values.end()) return nullptr; - return &it->second; - } - - // get a result by name, parsed as a specific type. - // NOTE: for counters, use GetCounterAs instead. - template - T GetAs(const char* entry_name) const; - - // counters are written as doubles, so they have to be read first - // as a double, and only then converted to the asked type. - template - T GetCounterAs(const char* entry_name) const { - double dval = GetAs< double >(entry_name); - T tval = static_cast< T >(dval); - return tval; - } -}; - -template -T Results::GetAs(const char* entry_name) const { - auto *sv = Get(entry_name); - CHECK(sv != nullptr && !sv->empty()); - std::stringstream ss; - ss << *sv; - T out; - ss >> out; - CHECK(!ss.fail()); - return out; -} - -//---------------------------------- -// Macros to help in result checking. Do not use them with arguments causing -// side-effects. - -#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ - CONCAT(CHECK_, relationship) \ - (entry.getfn< var_type >(var_name), (value)) << "\n" \ - << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ - << __FILE__ << ":" << __LINE__ << ": " \ - << "expected (" << #var_type << ")" << (var_name) \ - << "=" << (entry).getfn< var_type >(var_name) \ - << " to be " #relationship " to " << (value) << "\n" - -// check with tolerance. eps_factor is the tolerance window, which is -// interpreted relative to value (eg, 0.1 means 10% of value). -#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ - CONCAT(CHECK_FLOAT_, relationship) \ - (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ - << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ - << __FILE__ << ":" << __LINE__ << ": " \ - << "expected (" << #var_type << ")" << (var_name) \ - << "=" << (entry).getfn< var_type >(var_name) \ - << " to be " #relationship " to " << (value) << "\n" \ - << __FILE__ << ":" << __LINE__ << ": " \ - << "with tolerance of " << (eps_factor) * (value) \ - << " (" << (eps_factor)*100. << "%), " \ - << "but delta was " << ((entry).getfn< var_type >(var_name) - (value)) \ - << " (" << (((entry).getfn< var_type >(var_name) - (value)) \ - / \ - ((value) > 1.e-5 || value < -1.e-5 ? value : 1.e-5)*100.) \ - << "%)" - -#define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) - -#define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) - -#define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) - -#define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) - -// ========================================================================= // -// --------------------------- Misc Utilities ------------------------------ // -// ========================================================================= // - -namespace { - -const char* const dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; - -} // end namespace - -#endif // TEST_OUTPUT_TEST_H Index: MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/test/output_test_helper.cc @@ -1,423 +0,0 @@ -#include -#include -#include -#include -#include - -#include "../src/check.h" // NOTE: check.h is for internal use only! -#include "../src/re.h" // NOTE: re.h is for internal use only -#include "output_test.h" -#include "../src/benchmark_api_internal.h" - -// ========================================================================= // -// ------------------------------ Internals -------------------------------- // -// ========================================================================= // -namespace internal { -namespace { - -using TestCaseList = std::vector; - -// Use a vector because the order elements are added matters during iteration. -// std::map/unordered_map don't guarantee that. -// For example: -// SetSubstitutions({{"%HelloWorld", "Hello"}, {"%Hello", "Hi"}}); -// Substitute("%HelloWorld") // Always expands to Hello. -using SubMap = std::vector>; - -TestCaseList& GetTestCaseList(TestCaseID ID) { - // Uses function-local statics to ensure initialization occurs - // before first use. - static TestCaseList lists[TC_NumID]; - return lists[ID]; -} - -SubMap& GetSubstitutions() { - // Don't use 'dec_re' from header because it may not yet be initialized. - static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; - static SubMap map = { - {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, - // human-readable float - {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, - {"%int", "[ ]*[0-9]+"}, - {" %s ", "[ ]+"}, - {"%time", "[ ]*[0-9]{1,5} ns"}, - {"%console_report", "[ ]*[0-9]{1,5} ns [ ]*[0-9]{1,5} ns [ ]*[0-9]+"}, - {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"}, - {"%csv_header", - "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," - "items_per_second,label,error_occurred,error_message"}, - {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, - {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, - {"%csv_bytes_report", - "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, - {"%csv_items_report", - "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,," + safe_dec_re + ",,,"}, - {"%csv_bytes_items_report", - "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + - "," + safe_dec_re + ",,,"}, - {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, - {"%csv_label_report_end", ",,"}}; - return map; -} - -std::string PerformSubstitutions(std::string source) { - SubMap const& subs = GetSubstitutions(); - using SizeT = std::string::size_type; - for (auto const& KV : subs) { - SizeT pos; - SizeT next_start = 0; - while ((pos = source.find(KV.first, next_start)) != std::string::npos) { - next_start = pos + KV.second.size(); - source.replace(pos, KV.first.size(), KV.second); - } - } - return source; -} - -void CheckCase(std::stringstream& remaining_output, TestCase const& TC, - TestCaseList const& not_checks) { - std::string first_line; - bool on_first = true; - std::string line; - while (remaining_output.eof() == false) { - CHECK(remaining_output.good()); - std::getline(remaining_output, line); - if (on_first) { - first_line = line; - on_first = false; - } - for (const auto& NC : not_checks) { - CHECK(!NC.regex->Match(line)) - << "Unexpected match for line \"" << line << "\" for MR_Not regex \"" - << NC.regex_str << "\"" - << "\n actual regex string \"" << TC.substituted_regex << "\"" - << "\n started matching near: " << first_line; - } - if (TC.regex->Match(line)) return; - CHECK(TC.match_rule != MR_Next) - << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str - << "\"" - << "\n actual regex string \"" << TC.substituted_regex << "\"" - << "\n started matching near: " << first_line; - } - CHECK(remaining_output.eof() == false) - << "End of output reached before match for regex \"" << TC.regex_str - << "\" was found" - << "\n actual regex string \"" << TC.substituted_regex << "\"" - << "\n started matching near: " << first_line; -} - -void CheckCases(TestCaseList const& checks, std::stringstream& output) { - std::vector not_checks; - for (size_t i = 0; i < checks.size(); ++i) { - const auto& TC = checks[i]; - if (TC.match_rule == MR_Not) { - not_checks.push_back(TC); - continue; - } - CheckCase(output, TC, not_checks); - not_checks.clear(); - } -} - -class TestReporter : public benchmark::BenchmarkReporter { - public: - TestReporter(std::vector reps) - : reporters_(reps) {} - - virtual bool ReportContext(const Context& context) { - bool last_ret = false; - bool first = true; - for (auto rep : reporters_) { - bool new_ret = rep->ReportContext(context); - CHECK(first || new_ret == last_ret) - << "Reports return different values for ReportContext"; - first = false; - last_ret = new_ret; - } - (void)first; - return last_ret; - } - - void ReportRuns(const std::vector& report) { - for (auto rep : reporters_) rep->ReportRuns(report); - } - void Finalize() { - for (auto rep : reporters_) rep->Finalize(); - } - - private: - std::vector reporters_; -}; -} - -} // end namespace internal - -// ========================================================================= // -// -------------------------- Results checking ----------------------------- // -// ========================================================================= // - -namespace internal { - -// Utility class to manage subscribers for checking benchmark results. -// It works by parsing the CSV output to read the results. -class ResultsChecker { - public: - - struct PatternAndFn : public TestCase { // reusing TestCase for its regexes - PatternAndFn(const std::string& rx, ResultsCheckFn fn_) - : TestCase(rx), fn(fn_) {} - ResultsCheckFn fn; - }; - - std::vector< PatternAndFn > check_patterns; - std::vector< Results > results; - std::vector< std::string > field_names; - - void Add(const std::string& entry_pattern, ResultsCheckFn fn); - - void CheckResults(std::stringstream& output); - - private: - - void SetHeader_(const std::string& csv_header); - void SetValues_(const std::string& entry_csv_line); - - std::vector< std::string > SplitCsv_(const std::string& line); - -}; - -// store the static ResultsChecker in a function to prevent initialization -// order problems -ResultsChecker& GetResultsChecker() { - static ResultsChecker rc; - return rc; -} - -// add a results checker for a benchmark -void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { - check_patterns.emplace_back(entry_pattern, fn); -} - -// check the results of all subscribed benchmarks -void ResultsChecker::CheckResults(std::stringstream& output) { - // first reset the stream to the start - { - auto start = std::ios::streampos(0); - // clear before calling tellg() - output.clear(); - // seek to zero only when needed - if(output.tellg() > start) output.seekg(start); - // and just in case - output.clear(); - } - // now go over every line and publish it to the ResultsChecker - std::string line; - bool on_first = true; - while (output.eof() == false) { - CHECK(output.good()); - std::getline(output, line); - if (on_first) { - SetHeader_(line); // this is important - on_first = false; - continue; - } - SetValues_(line); - } - // finally we can call the subscribed check functions - for(const auto& p : check_patterns) { - VLOG(2) << "--------------------------------\n"; - VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; - for(const auto& r : results) { - if(!p.regex->Match(r.name)) { - VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; - continue; - } else { - VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; - } - VLOG(1) << "Checking results of " << r.name << ": ... \n"; - p.fn(r); - VLOG(1) << "Checking results of " << r.name << ": OK.\n"; - } - } -} - -// prepare for the names in this header -void ResultsChecker::SetHeader_(const std::string& csv_header) { - field_names = SplitCsv_(csv_header); -} - -// set the values for a benchmark -void ResultsChecker::SetValues_(const std::string& entry_csv_line) { - if(entry_csv_line.empty()) return; // some lines are empty - CHECK(!field_names.empty()); - auto vals = SplitCsv_(entry_csv_line); - CHECK_EQ(vals.size(), field_names.size()); - results.emplace_back(vals[0]); // vals[0] is the benchmark name - auto &entry = results.back(); - for (size_t i = 1, e = vals.size(); i < e; ++i) { - entry.values[field_names[i]] = vals[i]; - } -} - -// a quick'n'dirty csv splitter (eliminating quotes) -std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) { - std::vector< std::string > out; - if(line.empty()) return out; - if(!field_names.empty()) out.reserve(field_names.size()); - size_t prev = 0, pos = line.find_first_of(','), curr = pos; - while(pos != line.npos) { - CHECK(curr > 0); - if(line[prev] == '"') ++prev; - if(line[curr-1] == '"') --curr; - out.push_back(line.substr(prev, curr-prev)); - prev = pos + 1; - pos = line.find_first_of(',', pos + 1); - curr = pos; - } - curr = line.size(); - if(line[prev] == '"') ++prev; - if(line[curr-1] == '"') --curr; - out.push_back(line.substr(prev, curr-prev)); - return out; -} - -} // end namespace internal - -size_t AddChecker(const char* bm_name, ResultsCheckFn fn) -{ - auto &rc = internal::GetResultsChecker(); - rc.Add(bm_name, fn); - return rc.results.size(); -} - -int Results::NumThreads() const { - auto pos = name.find("/threads:"); - if(pos == name.npos) return 1; - auto end = name.find('/', pos + 9); - std::stringstream ss; - ss << name.substr(pos + 9, end); - int num = 1; - ss >> num; - CHECK(!ss.fail()); - return num; -} - -double Results::GetTime(BenchmarkTime which) const { - CHECK(which == kCpuTime || which == kRealTime); - const char *which_str = which == kCpuTime ? "cpu_time" : "real_time"; - double val = GetAs< double >(which_str); - auto unit = Get("time_unit"); - CHECK(unit); - if(*unit == "ns") { - return val * 1.e-9; - } else if(*unit == "us") { - return val * 1.e-6; - } else if(*unit == "ms") { - return val * 1.e-3; - } else if(*unit == "s") { - return val; - } else { - CHECK(1 == 0) << "unknown time unit: " << *unit; - return 0; - } -} - -// ========================================================================= // -// -------------------------- Public API Definitions------------------------ // -// ========================================================================= // - -TestCase::TestCase(std::string re, int rule) - : regex_str(std::move(re)), - match_rule(rule), - substituted_regex(internal::PerformSubstitutions(regex_str)), - regex(std::make_shared()) { - std::string err_str; - regex->Init(substituted_regex,& err_str); - CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex - << "\"" - << "\n originally \"" << regex_str << "\"" - << "\n got error: " << err_str; -} - -int AddCases(TestCaseID ID, std::initializer_list il) { - auto& L = internal::GetTestCaseList(ID); - L.insert(L.end(), il); - return 0; -} - -int SetSubstitutions( - std::initializer_list> il) { - auto& subs = internal::GetSubstitutions(); - for (auto KV : il) { - bool exists = false; - KV.second = internal::PerformSubstitutions(KV.second); - for (auto& EKV : subs) { - if (EKV.first == KV.first) { - EKV.second = std::move(KV.second); - exists = true; - break; - } - } - if (!exists) subs.push_back(std::move(KV)); - } - return 0; -} - -void RunOutputTests(int argc, char* argv[]) { - using internal::GetTestCaseList; - benchmark::Initialize(&argc, argv); - auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true); - benchmark::ConsoleReporter CR(options); - benchmark::JSONReporter JR; - benchmark::CSVReporter CSVR; - struct ReporterTest { - const char* name; - std::vector& output_cases; - std::vector& error_cases; - benchmark::BenchmarkReporter& reporter; - std::stringstream out_stream; - std::stringstream err_stream; - - ReporterTest(const char* n, std::vector& out_tc, - std::vector& err_tc, - benchmark::BenchmarkReporter& br) - : name(n), output_cases(out_tc), error_cases(err_tc), reporter(br) { - reporter.SetOutputStream(&out_stream); - reporter.SetErrorStream(&err_stream); - } - } TestCases[] = { - {"ConsoleReporter", GetTestCaseList(TC_ConsoleOut), - GetTestCaseList(TC_ConsoleErr), CR}, - {"JSONReporter", GetTestCaseList(TC_JSONOut), GetTestCaseList(TC_JSONErr), - JR}, - {"CSVReporter", GetTestCaseList(TC_CSVOut), GetTestCaseList(TC_CSVErr), - CSVR}, - }; - - // Create the test reporter and run the benchmarks. - std::cout << "Running benchmarks...\n"; - internal::TestReporter test_rep({&CR, &JR, &CSVR}); - benchmark::RunSpecifiedBenchmarks(&test_rep); - - for (auto& rep_test : TestCases) { - std::string msg = std::string("\nTesting ") + rep_test.name + " Output\n"; - std::string banner(msg.size() - 1, '-'); - std::cout << banner << msg << banner << "\n"; - - std::cerr << rep_test.err_stream.str(); - std::cout << rep_test.out_stream.str(); - - internal::CheckCases(rep_test.error_cases, rep_test.err_stream); - internal::CheckCases(rep_test.output_cases, rep_test.out_stream); - - std::cout << "\n"; - } - - // now that we know the output is as expected, we can dispatch - // the checks to subscribees. - auto &csv = TestCases[2]; - // would use == but gcc spits a warning - CHECK(std::strcmp(csv.name, "CSVReporter") == 0); - internal::GetResultsChecker().CheckResults(csv.out_stream); -} Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/__init__.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/__init__.py @@ -1,8 +0,0 @@ -"""Google Benchmark tooling""" - -__author__ = 'Eric Fiselier' -__email__ = 'eric@efcs.ca' -__versioninfo__ = (0, 5, 0) -__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev' - -__all__ = [] Index: MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.2.0/tools/gbench/util.py @@ -1,159 +0,0 @@ -"""util.py - General utilities for running, loading, and processing benchmarks -""" -import json -import os -import tempfile -import subprocess -import sys - -# Input file type enumeration -IT_Invalid = 0 -IT_JSON = 1 -IT_Executable = 2 - -_num_magic_bytes = 2 if sys.platform.startswith('win') else 4 -def is_executable_file(filename): - """ - Return 'True' if 'filename' names a valid file which is likely - an executable. A file is considered an executable if it starts with the - magic bytes for a EXE, Mach O, or ELF file. - """ - if not os.path.isfile(filename): - return False - with open(filename, mode='rb') as f: - magic_bytes = f.read(_num_magic_bytes) - if sys.platform == 'darwin': - return magic_bytes in [ - b'\xfe\xed\xfa\xce', # MH_MAGIC - b'\xce\xfa\xed\xfe', # MH_CIGAM - b'\xfe\xed\xfa\xcf', # MH_MAGIC_64 - b'\xcf\xfa\xed\xfe', # MH_CIGAM_64 - b'\xca\xfe\xba\xbe', # FAT_MAGIC - b'\xbe\xba\xfe\xca' # FAT_CIGAM - ] - elif sys.platform.startswith('win'): - return magic_bytes == b'MZ' - else: - return magic_bytes == b'\x7FELF' - - -def is_json_file(filename): - """ - Returns 'True' if 'filename' names a valid JSON output file. - 'False' otherwise. - """ - try: - with open(filename, 'r') as f: - json.load(f) - return True - except: - pass - return False - - -def classify_input_file(filename): - """ - Return a tuple (type, msg) where 'type' specifies the classified type - of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable - string represeting the error. - """ - ftype = IT_Invalid - err_msg = None - if not os.path.exists(filename): - err_msg = "'%s' does not exist" % filename - elif not os.path.isfile(filename): - err_msg = "'%s' does not name a file" % filename - elif is_executable_file(filename): - ftype = IT_Executable - elif is_json_file(filename): - ftype = IT_JSON - else: - err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename - return ftype, err_msg - - -def check_input_file(filename): - """ - Classify the file named by 'filename' and return the classification. - If the file is classified as 'IT_Invalid' print an error message and exit - the program. - """ - ftype, msg = classify_input_file(filename) - if ftype == IT_Invalid: - print("Invalid input file: %s" % msg) - sys.exit(1) - return ftype - -def find_benchmark_flag(prefix, benchmark_flags): - """ - Search the specified list of flags for a flag matching `` and - if it is found return the arg it specifies. If specified more than once the - last value is returned. If the flag is not found None is returned. - """ - assert prefix.startswith('--') and prefix.endswith('=') - result = None - for f in benchmark_flags: - if f.startswith(prefix): - result = f[len(prefix):] - return result - -def remove_benchmark_flags(prefix, benchmark_flags): - """ - Return a new list containing the specified benchmark_flags except those - with the specified prefix. - """ - assert prefix.startswith('--') and prefix.endswith('=') - return [f for f in benchmark_flags if not f.startswith(prefix)] - -def load_benchmark_results(fname): - """ - Read benchmark output from a file and return the JSON object. - REQUIRES: 'fname' names a file containing JSON benchmark output. - """ - with open(fname, 'r') as f: - return json.load(f) - - -def run_benchmark(exe_name, benchmark_flags): - """ - Run a benchmark specified by 'exe_name' with the specified - 'benchmark_flags'. The benchmark is run directly as a subprocess to preserve - real time console output. - RETURNS: A JSON object representing the benchmark output - """ - output_name = find_benchmark_flag('--benchmark_out=', - benchmark_flags) - is_temp_output = False - if output_name is None: - is_temp_output = True - thandle, output_name = tempfile.mkstemp() - os.close(thandle) - benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] - - cmd = [exe_name] + benchmark_flags - print("RUNNING: %s" % ' '.join(cmd)) - exitCode = subprocess.call(cmd) - if exitCode != 0: - print('TEST FAILED...') - sys.exit(exitCode) - json_res = load_benchmark_results(output_name) - if is_temp_output: - os.unlink(output_name) - return json_res - - -def run_or_load_benchmark(filename, benchmark_flags): - """ - Get the results for a specified benchmark. If 'filename' specifies - an executable benchmark then the results are generated by running the - benchmark. Otherwise 'filename' must name a valid JSON output file, - which is loaded and the result returned. - """ - ftype = check_input_file(filename) - if ftype == IT_JSON: - return load_benchmark_results(filename) - elif ftype == IT_Executable: - return run_benchmark(filename, benchmark_flags) - else: - assert False # This branch is unreachable \ No newline at end of file Index: MicroBenchmarks/libs/benchmark-1.3.0/AUTHORS =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/AUTHORS +++ MicroBenchmarks/libs/benchmark-1.3.0/AUTHORS @@ -10,9 +10,11 @@ Albert Pretorius Arne Beer +Carto Christopher Seymour David Coeurjolly -Dominic Hamon +Dirac Research +Dominik Czarnota Eric Fiselier Eugene Zhuk Evgeny Safronov @@ -21,8 +23,8 @@ International Business Machines Corporation Ismael Jimenez Martinez Jern-Kuan Leong -Joao Paulo Magalhaes JianXiong Zhou +Joao Paulo Magalhaes Jussi Knuuttila Kaito Udagawa Lei Xu @@ -32,9 +34,8 @@ Oleksandr Sochka Paul Redmond Radoslav Yovchev +Roman Lebedev Shuo Chen Yixuan Qiu Yusuke Suzuki -Dirac Research Zbigniew Skowron -Dominik Czarnota Index: MicroBenchmarks/libs/benchmark-1.3.0/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/CMakeLists.txt +++ MicroBenchmarks/libs/benchmark-1.3.0/CMakeLists.txt @@ -15,7 +15,8 @@ option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) -option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library" OFF) +option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) +option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) # Make sure we can import out CMake functions list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") @@ -135,24 +136,21 @@ endif() # Coverage build type - set(CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING - "Flags used by the C++ compiler during coverage builds." + set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}" + CACHE STRING "Flags used by the C++ compiler during coverage builds." FORCE) - set(CMAKE_EXE_LINKER_FLAGS_COVERAGE - "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" CACHE STRING - "Flags used for linking binaries during coverage builds." + set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" + CACHE STRING "Flags used for linking binaries during coverage builds." FORCE) - set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE - "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" CACHE STRING - "Flags used by the shared libraries linker during coverage builds." + set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" + CACHE STRING "Flags used by the shared libraries linker during coverage builds." FORCE) mark_as_advanced( - CMAKE_CXX_FLAGS_COVERAGE - CMAKE_EXE_LINKER_FLAGS_COVERAGE - CMAKE_SHARED_LINKER_FLAGS_COVERAGE) + BENCHMARK_CXX_FLAGS_COVERAGE + BENCHMARK_EXE_LINKER_FLAGS_COVERAGE + BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE) set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING - "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage." - FORCE) + "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.") add_cxx_compiler_flag(--coverage COVERAGE) endif() Index: MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTORS =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTORS +++ MicroBenchmarks/libs/benchmark-1.3.0/CONTRIBUTORS @@ -28,18 +28,19 @@ Chris Kennelly Christopher Seymour David Coeurjolly -Dominic Hamon +Dominic Hamon +Dominik Czarnota Eric Fiselier Eugene Zhuk Evgeny Safronov Felix Homann Ismael Jimenez Martinez Jern-Kuan Leong -Joao Paulo Magalhaes JianXiong Zhou +Joao Paulo Magalhaes Jussi Knuuttila -Kaito Udagawa Kai Wolf +Kaito Udagawa Lei Xu Matt Clarkson Maxim Vafin @@ -49,11 +50,12 @@ Paul Redmond Pierre Phaneuf Radoslav Yovchev +Raul Marin Ray Glover +Roman Lebedev Shuo Chen +Tobias Ulvgård Tom Madams Yixuan Qiu Yusuke Suzuki -Tobias Ulvgård Zbigniew Skowron -Dominik Czarnota Index: MicroBenchmarks/libs/benchmark-1.3.0/README.md =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/README.md +++ MicroBenchmarks/libs/benchmark-1.3.0/README.md @@ -18,8 +18,10 @@ Define a function that executes the code to be measured. ```c++ +#include + static void BM_StringCreation(benchmark::State& state) { - while (state.KeepRunning()) + for (auto _ : state) std::string empty_string; } // Register the function as a benchmark @@ -28,7 +30,7 @@ // Define another benchmark static void BM_StringCopy(benchmark::State& state) { std::string x = "hello"; - while (state.KeepRunning()) + for (auto _ : state) std::string copy(x); } BENCHMARK(BM_StringCopy); @@ -36,6 +38,8 @@ BENCHMARK_MAIN(); ``` +Don't forget to inform your linker to add benchmark library e.g. through `-lbenchmark` compilation flag. + ### Passing arguments Sometimes a family of benchmarks can be implemented with just one routine that takes an extra argument to specify which one of the family of benchmarks to @@ -47,7 +51,7 @@ char* src = new char[state.range(0)]; char* dst = new char[state.range(0)]; memset(src, 'x', state.range(0)); - while (state.KeepRunning()) + for (auto _ : state) memcpy(dst, src, state.range(0)); state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(state.range(0))); @@ -80,22 +84,23 @@ ```c++ static void BM_SetInsert(benchmark::State& state) { - while (state.KeepRunning()) { + std::set data; + for (auto _ : state) { state.PauseTiming(); - std::set data = ConstructRandomSet(state.range(0)); + data = ConstructRandomSet(state.range(0)); state.ResumeTiming(); for (int j = 0; j < state.range(1); ++j) data.insert(RandomNumber()); } } BENCHMARK(BM_SetInsert) - ->Args({1<<10, 1}) - ->Args({1<<10, 8}) - ->Args({1<<10, 64}) + ->Args({1<<10, 128}) + ->Args({2<<10, 128}) + ->Args({4<<10, 128}) + ->Args({8<<10, 128}) ->Args({1<<10, 512}) - ->Args({8<<10, 1}) - ->Args({8<<10, 8}) - ->Args({8<<10, 64}) + ->Args({2<<10, 512}) + ->Args({4<<10, 512}) ->Args({8<<10, 512}); ``` @@ -105,7 +110,7 @@ pair. ```c++ -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}}); +BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); ``` For more complex patterns of inputs, passing a custom function to `Apply` allows @@ -131,7 +136,7 @@ static void BM_StringCompare(benchmark::State& state) { std::string s1(state.range(0), '-'); std::string s2(state.range(0), '-'); - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(s1.compare(s2)); } state.SetComplexityN(state.range(0)); @@ -165,7 +170,7 @@ template int BM_Sequential(benchmark::State& state) { Q q; typename Q::value_type v; - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = state.range(0); i--; ) q.push(v); for (int e = state.range(0); e--; ) @@ -181,7 +186,7 @@ Three macros are provided for adding benchmark templates. ```c++ -#if __cplusplus >= 201103L // C++11 and greater. +#ifdef BENCHMARK_HAS_CXX11 #define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. #else // C++ < C++11 #define BENCHMARK_TEMPLATE(func, arg1) @@ -190,6 +195,62 @@ #define BENCHMARK_TEMPLATE2(func, arg1, arg2) ``` +### A Faster KeepRunning loop + +In C++11 mode, a ranged-based for loop should be used in preference to +the `KeepRunning` loop for running the benchmarks. For example: + +```c++ +static void BM_Fast(benchmark::State &state) { + for (auto _ : state) { + FastOperation(); + } +} +BENCHMARK(BM_Fast); +``` + +The reason the ranged-for loop is faster than using `KeepRunning`, is +because `KeepRunning` requires a memory load and store of the iteration count +ever iteration, whereas the ranged-for variant is able to keep the iteration count +in a register. + +For example, an empty inner loop of using the ranged-based for method looks like: + +```asm +# Loop Init + mov rbx, qword ptr [r14 + 104] + call benchmark::State::StartKeepRunning() + test rbx, rbx + je .LoopEnd +.LoopHeader: # =>This Inner Loop Header: Depth=1 + add rbx, -1 + jne .LoopHeader +.LoopEnd: +``` + +Compared to an empty `KeepRunning` loop, which looks like: + +```asm +.LoopHeader: # in Loop: Header=BB0_3 Depth=1 + cmp byte ptr [rbx], 1 + jne .LoopInit +.LoopBody: # =>This Inner Loop Header: Depth=1 + mov rax, qword ptr [rbx + 8] + lea rcx, [rax + 1] + mov qword ptr [rbx + 8], rcx + cmp rax, qword ptr [rbx + 104] + jb .LoopHeader + jmp .LoopEnd +.LoopInit: + mov rdi, rbx + call benchmark::State::StartKeepRunning() + jmp .LoopBody +.LoopEnd: +``` + +Unless C++03 compatibility is required, the ranged-for variant of writing +the benchmark loop should be preferred. + ## Passing arbitrary arguments to a benchmark In C++11 it is possible to define a benchmark that takes an arbitrary number of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` @@ -199,11 +260,11 @@ should describe the values passed. ```c++ -template ` +template void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { [...] } -// Registers a benchmark named "BM_takes_args/int_string_test` that passes +// Registers a benchmark named "BM_takes_args/int_string_test" that passes // the specified values to `extra_args`. BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); ``` @@ -223,8 +284,7 @@ benchmark tests to be registered programmatically. Additionally `RegisterBenchmark` allows any callable object to be registered -as a benchmark. Including capturing lambdas and function objects. This -allows the creation +as a benchmark. Including capturing lambdas and function objects. For Example: ```c++ @@ -240,9 +300,10 @@ ### Multithreaded benchmarks In a multithreaded test (benchmark invoked by multiple threads simultaneously), -it is guaranteed that none of the threads will start until all have called -`KeepRunning`, and all will have finished before KeepRunning returns false. As -such, any global setup or teardown can be wrapped in a check against the thread +it is guaranteed that none of the threads will start until all have reached +the start of the benchmark loop, and all will have finished before any thread +exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` +API) As such, any global setup or teardown can be wrapped in a check against the thread index: ```c++ @@ -250,7 +311,7 @@ if (state.thread_index == 0) { // Setup code here. } - while (state.KeepRunning()) { + for (auto _ : state) { // Run the test as normal. } if (state.thread_index == 0) { @@ -274,10 +335,10 @@ ## Manual timing For benchmarking something for which neither CPU time nor real-time are correct or accurate enough, completely manual timing is supported using -the `UseManualTime` function. +the `UseManualTime` function. When `UseManualTime` is used, the benchmarked code must call -`SetIterationTime` once per iteration of the `KeepRunning` loop to +`SetIterationTime` once per iteration of the benchmark loop to report the manually measured time. An example use case for this is benchmarking GPU execution (e.g. OpenCL @@ -293,7 +354,7 @@ static_cast(microseconds) }; - while (state.KeepRunning()) { + for (auto _ : state) { auto start = std::chrono::high_resolution_clock::now(); // Simulate some useful workload with a sleep std::this_thread::sleep_for(sleep_duration); @@ -316,7 +377,7 @@ ```c++ static void BM_test(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { int x = 0; for (int i=0; i < 64; ++i) { benchmark::DoNotOptimize(x += i); @@ -355,7 +416,7 @@ ```c++ static void BM_vector_push_back(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { std::vector v; v.reserve(1); benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. @@ -384,7 +445,7 @@ set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on the registered benchmark object. -## Reporting the mean and standard devation by repeated benchmarks +## Reporting the mean, median and standard deviation by repeated benchmarks By default each benchmark is run once and that single result is reported. However benchmarks are often noisy and a single result may not be representative of the overall behavior. For this reason it's possible to repeatedly rerun the @@ -392,19 +453,42 @@ The number of runs of each benchmark is specified globally by the `--benchmark_repetitions` flag or on a per benchmark basis by calling -`Repetitions` on the registered benchmark object. When a benchmark is run -more than once the mean and standard deviation of the runs will be reported. +`Repetitions` on the registered benchmark object. When a benchmark is run more +than once the mean, median and standard deviation of the runs will be reported. Additionally the `--benchmark_report_aggregates_only={true|false}` flag or `ReportAggregatesOnly(bool)` function can be used to change how repeated tests are reported. By default the result of each repeated run is reported. When this -option is 'true' only the mean and standard deviation of the runs is reported. +option is `true` only the mean, median and standard deviation of the runs is reported. Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides the value of the flag for that benchmark. +## User-defined statistics for repeated benchmarks +While having mean, median and standard deviation is nice, this may not be +enough for everyone. For example you may want to know what is the largest +observation, e.g. because you have some real-time constraints. This is easy. +The following code will specify a custom statistic to be calculated, defined +by a lambda function. + +```c++ +void BM_spin_empty(benchmark::State& state) { + for (auto _ : state) { + for (int x = 0; x < state.range(0); ++x) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK(BM_spin_empty) + ->ComputeStatistics("max", [](const std::vector& v) -> double { + return *(std::max_element(std::begin(v), std::end(v))); + }) + ->Arg(512); +``` + ## Fixtures Fixture tests are created by -first defining a type that derives from ::benchmark::Fixture and then +first defining a type that derives from `::benchmark::Fixture` and then creating/registering the tests using the following macros: * `BENCHMARK_F(ClassName, Method)` @@ -417,13 +501,13 @@ class MyFixture : public benchmark::Fixture {}; BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { ... } } BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { ... } } @@ -432,6 +516,31 @@ /* BarTest is now registered */ ``` +### Templated fixtures +Also you can create templated fixture by using the following macros: + +* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` +* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` + +For example: +```c++ +template +class MyFixture : public benchmark::Fixture {}; + +BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); +``` ## User-defined counters @@ -441,7 +550,7 @@ ```c++ static void UserCountersExample1(benchmark::State& state) { double numFoos = 0, numBars = 0, numBazs = 0; - while (state.KeepRunning()) { + for (auto _ : state) { // ... count Foo,Bar,Baz events } state.counters["Foo"] = numFoos; @@ -564,11 +673,12 @@ communication, occur within a benchmark the `State::SkipWithError(const char* msg)` function can be used to skip that run of benchmark and report the error. Note that only future iterations of the -`KeepRunning()` are skipped. Users may explicitly return to exit the -benchmark immediately. +`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop +Users must explicitly exit the loop, otherwise all iterations will be performed. +Users may explicitly return to exit the benchmark immediately. The `SkipWithError(...)` function may be used at any point within the benchmark, -including before and after the `KeepRunning()` loop. +including before and after the benchmark loop. For example: @@ -579,7 +689,7 @@ state.SkipWithError("Resource is not good!"); // KeepRunning() loop will not be entered. } - while (state.KeepRunning()) { + for (state.KeepRunning()) { auto data = resource.read_data(); if (!resource.good()) { state.SkipWithError("Failed to read data!"); @@ -588,6 +698,14 @@ do_stuff(data); } } + +static void BM_test_ranged_fo(benchmark::State & state) { + state.SkipWithError("test will not be entered"); + for (auto _ : state) { + state.SkipWithError("Failed!"); + break; // REQUIRED to prevent all further iterations. + } +} ``` ## Running a subset of the benchmarks @@ -614,7 +732,7 @@ is the default format. The Console format is intended to be a human readable format. By default -the format generates color output. Context is output on stderr and the +the format generates color output. Context is output on stderr and the tabular data on stdout. Example tabular output looks like: ``` Benchmark Time(ns) CPU(ns) Iterations @@ -717,6 +835,18 @@ Note: Using the library and its headers in C++03 is supported. C++11 is only required to build the library. +## Disable CPU frequency scaling +If you see this error: +``` +***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. +``` +you might want to disable the CPU frequency scaling while running the benchmark: +```bash +sudo cpupower frequency-set --governor performance +./mybench +sudo cpupower frequency-set --governor powersave +``` + # Known Issues ### Windows Index: MicroBenchmarks/libs/benchmark-1.3.0/cmake/AddCXXCompilerFlag.cmake =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/cmake/AddCXXCompilerFlag.cmake +++ MicroBenchmarks/libs/benchmark-1.3.0/cmake/AddCXXCompilerFlag.cmake @@ -38,7 +38,7 @@ if(ARGV1) string(TOUPPER "_${VARIANT}" VARIANT) endif() - set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) + set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) endif() endfunction() Index: MicroBenchmarks/libs/benchmark-1.3.0/cmake/CXXFeatureCheck.cmake =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/cmake/CXXFeatureCheck.cmake +++ MicroBenchmarks/libs/benchmark-1.3.0/cmake/CXXFeatureCheck.cmake @@ -22,18 +22,35 @@ string(TOUPPER ${FILE} VAR) string(TOUPPER "HAVE_${VAR}" FEATURE) if (DEFINED HAVE_${VAR}) - set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE) + set(HAVE_${VAR} 1 PARENT_SCOPE) add_definitions(-DHAVE_${VAR}) return() endif() + message("-- Performing Test ${FEATURE}") - try_run(RUN_${FEATURE} COMPILE_${FEATURE} - ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp - CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + if(CMAKE_CROSSCOMPILING) + try_compile(COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + if(COMPILE_${FEATURE}) + message(WARNING + "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") + set(RUN_${FEATURE} 0) + else() + set(RUN_${FEATURE} 1) + endif() + else() + message("-- Performing Test ${FEATURE}") + try_run(RUN_${FEATURE} COMPILE_${FEATURE} + ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp + CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + endif() + if(RUN_${FEATURE} EQUAL 0) message("-- Performing Test ${FEATURE} -- success") - set(HAVE_${VAR} 1 CACHE INTERNAL "Feature test for ${FILE}" PARENT_SCOPE) + set(HAVE_${VAR} 1 PARENT_SCOPE) add_definitions(-DHAVE_${VAR}) else() if(NOT COMPILE_${FEATURE}) @@ -43,4 +60,3 @@ endif() endif() endfunction() - Index: MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark.h +++ MicroBenchmarks/libs/benchmark-1.3.0/include/benchmark/benchmark.h @@ -18,7 +18,7 @@ // Define a function that executes the code to be measured a // specified number of times: static void BM_StringCreation(benchmark::State& state) { - while (state.KeepRunning()) + for (auto _ : state) std::string empty_string; } @@ -28,7 +28,7 @@ // Define another benchmark static void BM_StringCopy(benchmark::State& state) { std::string x = "hello"; - while (state.KeepRunning()) + for (auto _ : state) std::string copy(x); } BENCHMARK(BM_StringCopy); @@ -54,7 +54,7 @@ static void BM_memcpy(benchmark::State& state) { char* src = new char[state.range(0)]; char* dst = new char[state.range(0)]; memset(src, 'x', state.range(0)); - while (state.KeepRunning()) + for (auto _ : state) memcpy(dst, src, state.range(0)); state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(state.range(0))); @@ -72,29 +72,30 @@ // example, the following code defines a family of microbenchmarks for // measuring the speed of set insertion. static void BM_SetInsert(benchmark::State& state) { - while (state.KeepRunning()) { + set data; + for (auto _ : state) { state.PauseTiming(); - set data = ConstructRandomSet(state.range(0)); + data = ConstructRandomSet(state.range(0)); state.ResumeTiming(); for (int j = 0; j < state.range(1); ++j) data.insert(RandomNumber()); } } BENCHMARK(BM_SetInsert) - ->Args({1<<10, 1}) - ->Args({1<<10, 8}) - ->Args({1<<10, 64}) + ->Args({1<<10, 128}) + ->Args({2<<10, 128}) + ->Args({4<<10, 128}) + ->Args({8<<10, 128}) ->Args({1<<10, 512}) - ->Args({8<<10, 1}) - ->Args({8<<10, 8}) - ->Args({8<<10, 64}) + ->Args({2<<10, 512}) + ->Args({4<<10, 512}) ->Args({8<<10, 512}); // The preceding code is quite repetitive, and can be replaced with // the following short-hand. The following macro will pick a few // appropriate arguments in the product of the two specified ranges // and will generate a microbenchmark for each such pair. -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {1, 512}}); +BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); // For more complex patterns of inputs, passing a custom function // to Apply allows programmatic specification of an @@ -114,7 +115,7 @@ template int BM_Sequential(benchmark::State& state) { Q q; typename Q::value_type v; - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = state.range(0); i--; ) q.push(v); for (int e = state.range(0); e--; ) @@ -135,15 +136,15 @@ BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds. In a multithreaded test, it is guaranteed that none of the threads will start -until all have called KeepRunning, and all will have finished before KeepRunning -returns false. As such, any global setup or teardown you want to do can be -wrapped in a check against the thread index: +until all have reached the loop start, and all will have finished before any +thread exits the loop body. As such, any global setup or teardown you want to +do can be wrapped in a check against the thread index: static void BM_MultiThreaded(benchmark::State& state) { if (state.thread_index == 0) { // Setup code here. } - while (state.KeepRunning()) { + for (auto _ : state) { // Run the test as normal. } if (state.thread_index == 0) { @@ -164,7 +165,8 @@ #define BENCHMARK_BENCHMARK_H_ -#if __cplusplus >= 201103L +// The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. +#if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) #define BENCHMARK_HAS_CXX11 #endif @@ -237,7 +239,6 @@ #define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #endif - namespace benchmark { class BenchmarkReporter; @@ -289,10 +290,12 @@ } // namespace internal -#if !defined(__GNUC__) || defined(__pnacl__) || defined(EMSCRIPTN) +#if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ + defined(EMSCRIPTN) # define BENCHMARK_HAS_NO_INLINE_ASSEMBLY #endif + // The DoNotOptimize(...) function can be used to prevent a value or // expression from being optimized away by the compiler. This function is // intended to add little to no overhead. @@ -378,6 +381,18 @@ // computational complexity for the benchmark. typedef double(BigOFunc)(int); +// StatisticsFunc is passed to a benchmark in order to compute some descriptive +// statistics over all the measurements of some type +typedef double(StatisticsFunc)(const std::vector&); + +struct Statistics { + std::string name_; + StatisticsFunc* compute_; + + Statistics(std::string name, StatisticsFunc* compute) + : name_(name), compute_(compute) {} +}; + namespace internal { class ThreadTimer; class ThreadManager; @@ -398,6 +413,19 @@ // benchmark to use. class State { public: + struct StateIterator; + friend struct StateIterator; + + // Returns iterators used to run each iteration of a benchmark using a + // C++11 ranged-based for loop. These functions should not be called directly. + // + // REQUIRES: The benchmark has not started running yet. Neither begin nor end + // have been called previously. + // + // NOTE: KeepRunning may not be used after calling either of these functions. + BENCHMARK_ALWAYS_INLINE StateIterator begin(); + BENCHMARK_ALWAYS_INLINE StateIterator end(); + // Returns true if the benchmark should continue through another iteration. // NOTE: A benchmark may not return from the test until KeepRunning() has // returned false. @@ -405,7 +433,7 @@ if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { StartKeepRunning(); } - bool const res = total_iterations_++ < max_iterations; + bool const res = --total_iterations_; if (BENCHMARK_BUILTIN_EXPECT(!res, false)) { FinishKeepRunning(); } @@ -415,7 +443,7 @@ // REQUIRES: timer is running and 'SkipWithError(...)' has not been called // by the current thread. // Stop the benchmark timer. If not called, the timer will be - // automatically stopped after KeepRunning() returns false for the first time. + // automatically stopped after the last iteration of the benchmark loop. // // For threaded benchmarks the PauseTiming() function only pauses the timing // for the current thread. @@ -431,7 +459,8 @@ // REQUIRES: timer is not running and 'SkipWithError(...)' has not been called // by the current thread. // Start the benchmark timer. The timer is NOT running on entrance to the - // benchmark function. It begins running after the first call to KeepRunning() + // benchmark function. It begins running after control flow enters the + // benchmark loop. // // NOTE: PauseTiming()/ResumeTiming() are relatively // heavyweight, and so their use should generally be avoided @@ -440,9 +469,13 @@ // REQUIRES: 'SkipWithError(...)' has not been called previously by the // current thread. - // Skip any future iterations of the 'KeepRunning()' loop in the current - // thread and report an error with the specified 'msg'. After this call - // the user may explicitly 'return' from the benchmark. + // Report the benchmark as resulting in an error with the specified 'msg'. + // After this call the user may explicitly 'return' from the benchmark. + // + // If the ranged-for style of benchmark loop is used, the user must explicitly + // break from the loop, otherwise all future iterations will be run. + // If the 'KeepRunning()' loop is used the current thread will automatically + // exit the loop at the end of the current iteration. // // For threaded benchmarks only the current thread stops executing and future // calls to `KeepRunning()` will block until all threads have completed @@ -455,7 +488,7 @@ // responsibility to exit the scope as needed. void SkipWithError(const char* msg); - // REQUIRES: called exactly once per iteration of the KeepRunning loop. + // REQUIRES: called exactly once per iteration of the benchmarking loop. // Set the manually measured time for this benchmark iteration, which // is used instead of automatically measured time if UseManualTime() was // specified. @@ -470,7 +503,7 @@ // value > 0, the report is printed in MB/sec instead of nanoseconds // per iteration. // - // REQUIRES: a benchmark has exited its KeepRunning loop. + // REQUIRES: a benchmark has exited its benchmarking loop. BENCHMARK_ALWAYS_INLINE void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; } @@ -493,7 +526,7 @@ // executing benchmark. It is typically called at the end of a processing // benchmark where a processing items/second output is desired. // - // REQUIRES: a benchmark has exited its KeepRunning loop. + // REQUIRES: a benchmark has exited its benchmarking loop. BENCHMARK_ALWAYS_INLINE void SetItemsProcessed(size_t items) { items_processed_ = items; } @@ -511,7 +544,7 @@ // Produces output that looks like: // BM_Compress 50 50 14115038 compress:27.3% // - // REQUIRES: a benchmark has exited its KeepRunning loop. + // REQUIRES: a benchmark has exited its benchmarking loop. void SetLabel(const char* label); void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) { @@ -532,7 +565,7 @@ int range_y() const { return range(1); } BENCHMARK_ALWAYS_INLINE - size_t iterations() const { return total_iterations_; } + size_t iterations() const { return (max_iterations - total_iterations_) + 1; } private: bool started_; @@ -570,6 +603,53 @@ BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State); }; +struct State::StateIterator { + struct BENCHMARK_UNUSED Value {}; + typedef std::forward_iterator_tag iterator_category; + typedef Value value_type; + typedef Value reference; + typedef Value pointer; + + private: + friend class State; + BENCHMARK_ALWAYS_INLINE + StateIterator() : cached_(0), parent_() {} + + BENCHMARK_ALWAYS_INLINE + explicit StateIterator(State* st) + : cached_(st->error_occurred_ ? 0 : st->max_iterations), parent_(st) {} + + public: + BENCHMARK_ALWAYS_INLINE + Value operator*() const { return Value(); } + + BENCHMARK_ALWAYS_INLINE + StateIterator& operator++() { + assert(cached_ > 0); + --cached_; + return *this; + } + + BENCHMARK_ALWAYS_INLINE + bool operator!=(StateIterator const&) const { + if (BENCHMARK_BUILTIN_EXPECT(cached_ != 0, true)) return true; + parent_->FinishKeepRunning(); + return false; + } + + private: + size_t cached_; + State* const parent_; +}; + +BENCHMARK_ALWAYS_INLINE inline State::StateIterator State::begin() { + return StateIterator(this); +} +BENCHMARK_ALWAYS_INLINE inline State::StateIterator State::end() { + StartKeepRunning(); + return StateIterator(); +} + namespace internal { typedef void(Function)(State&); @@ -698,6 +778,9 @@ // the asymptotic computational complexity will be shown on the output. Benchmark* Complexity(BigOFunc* complexity); + // Add this statistics to be computed over all the values of benchmark run + Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics); + // Support for running multiple copies of the same benchmark concurrently // in multiple threads. This may be useful when measuring the scaling // of some piece of code. @@ -758,6 +841,7 @@ bool use_manual_time_; BigO complexity_; BigOFunc* complexity_lambda_; + std::vector statistics_; std::vector thread_counts_; Benchmark& operator=(Benchmark const&); @@ -905,7 +989,7 @@ #define BENCHMARK_RANGE2(n, l1, h1, l2, h2) \ BENCHMARK(n)->RangePair({{(l1), (h1)}, {(l2), (h2)}}) -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 // Register a benchmark which invokes the function specified by `func` // with the additional arguments specified by `...`. @@ -925,7 +1009,7 @@ #func "/" #test_case_name, \ [](::benchmark::State& st) { func(st, __VA_ARGS__); }))) -#endif // __cplusplus >= 11 +#endif // BENCHMARK_HAS_CXX11 // This will register a benchmark for a templatized function. For example: // @@ -946,7 +1030,7 @@ new ::benchmark::internal::FunctionBenchmark(#n "<" #a "," #b ">", \ n))) -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 #define BENCHMARK_TEMPLATE(n, ...) \ BENCHMARK_PRIVATE_DECLARE(n) = \ (::benchmark::internal::RegisterBenchmarkInternal( \ @@ -967,10 +1051,63 @@ virtual void BenchmarkCase(::benchmark::State&); \ }; +#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass() { \ + this->SetName(#BaseClass"<" #a ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; + +#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass() { \ + this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ + class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ + public: \ + BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ + this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&); \ + }; +#else +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) +#endif + #define BENCHMARK_DEFINE_F(BaseClass, Method) \ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ void BaseClass##_##Method##_Benchmark::BenchmarkCase +#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \ + BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \ + BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase +#else +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) +#endif + #define BENCHMARK_REGISTER_F(BaseClass, Method) \ BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark) @@ -984,6 +1121,25 @@ BENCHMARK_REGISTER_F(BaseClass, Method); \ void BaseClass##_##Method##_Benchmark::BenchmarkCase +#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \ + BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ + BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BaseClass##_##Method##_Benchmark::BenchmarkCase +#else +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) +#endif + // Helper macro to create a main routine in a test that runs the benchmarks #define BENCHMARK_MAIN() \ int main(int argc, char** argv) { \ @@ -1065,6 +1221,9 @@ BigOFunc* complexity_lambda; int complexity_n; + // what statistics to compute from the measurements + const std::vector* statistics; + // Inform print function whether the current run is a complexity report bool report_big_o; bool report_rms; Index: MicroBenchmarks/libs/benchmark-1.3.0/src/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/CMakeLists.txt +++ MicroBenchmarks/libs/benchmark-1.3.0/src/CMakeLists.txt @@ -54,25 +54,27 @@ configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) -# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) -install( - TARGETS benchmark - EXPORT ${targets_export_name} - ARCHIVE DESTINATION ${lib_install_dir} - LIBRARY DESTINATION ${lib_install_dir} - RUNTIME DESTINATION ${bin_install_dir} - INCLUDES DESTINATION ${include_install_dir}) - -install( - DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" - DESTINATION ${include_install_dir} - FILES_MATCHING PATTERN "*.*h") - -install( - FILES "${project_config}" "${version_config}" - DESTINATION "${config_install_dir}") - -install( - EXPORT "${targets_export_name}" - NAMESPACE "${namespace}" - DESTINATION "${config_install_dir}") +if (BENCHMARK_ENABLE_INSTALL) + # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) + install( + TARGETS benchmark + EXPORT ${targets_export_name} + ARCHIVE DESTINATION ${lib_install_dir} + LIBRARY DESTINATION ${lib_install_dir} + RUNTIME DESTINATION ${bin_install_dir} + INCLUDES DESTINATION ${include_install_dir}) + + install( + DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" + DESTINATION ${include_install_dir} + FILES_MATCHING PATTERN "*.*h") + + install( + FILES "${project_config}" "${version_config}" + DESTINATION "${config_install_dir}") + + install( + EXPORT "${targets_export_name}" + NAMESPACE "${namespace}" + DESTINATION "${config_install_dir}") +endif() Index: MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark.cc @@ -37,11 +37,11 @@ #include "colorprint.h" #include "commandlineflags.h" #include "complexity.h" +#include "statistics.h" #include "counter.h" #include "log.h" #include "mutex.h" #include "re.h" -#include "stat.h" #include "string_util.h" #include "sysinfo.h" #include "timers.h" @@ -99,20 +99,15 @@ DEFINE_int32(v, 0, "The level of verbose logging to output"); namespace benchmark { -namespace internal { - -void UseCharPointer(char const volatile*) {} - -} // end namespace internal namespace { - static const size_t kMaxIterations = 1000000000; - } // end namespace namespace internal { +void UseCharPointer(char const volatile*) {} + class ThreadManager { public: ThreadManager(int num_threads) @@ -256,6 +251,7 @@ report.complexity_n = results.complexity_n; report.complexity = b.complexity; report.complexity_lambda = b.complexity_lambda; + report.statistics = b.statistics; report.counters = results.counters; internal::Finish(&report.counters, seconds, b.threads); } @@ -401,7 +397,7 @@ internal::ThreadManager* manager) : started_(false), finished_(false), - total_iterations_(0), + total_iterations_(max_iters + 1), range_(ranges), bytes_processed_(0), items_processed_(0), @@ -414,6 +410,7 @@ timer_(timer), manager_(manager) { CHECK(max_iterations != 0) << "At least one iteration must be run"; + CHECK(total_iterations_ != 0) << "max iterations wrapped around"; CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; } @@ -438,7 +435,7 @@ manager_->results.has_error_ = true; } } - total_iterations_ = max_iterations; + total_iterations_ = 1; if (timer_->running()) timer_->StopTimer(); } @@ -463,8 +460,8 @@ if (!error_occurred_) { PauseTiming(); } - // Total iterations now is one greater than max iterations. Fix this. - total_iterations_ = max_iterations; + // Total iterations has now wrapped around zero. Fix this. + total_iterations_ = 1; finished_ = true; manager_->StartStopBarrier(); } @@ -481,17 +478,21 @@ // Determine the width of the name field using a minimum width of 10. bool has_repetitions = FLAGS_benchmark_repetitions > 1; size_t name_field_width = 10; + size_t stat_field_width = 0; for (const Benchmark::Instance& benchmark : benchmarks) { name_field_width = std::max(name_field_width, benchmark.name.size()); has_repetitions |= benchmark.repetitions > 1; + + for(const auto& Stat : *benchmark.statistics) + stat_field_width = std::max(stat_field_width, Stat.name_.size()); } - if (has_repetitions) name_field_width += std::strlen("_stddev"); + if (has_repetitions) name_field_width += 1 + stat_field_width; // Print header here BenchmarkReporter::Context context; context.num_cpus = NumCPUs(); - context.mhz_per_cpu = CyclesPerSecond() / 1000000.0f; + context.mhz_per_cpu = CyclesPerSecond() / 1000000.0; context.cpu_scaling_enabled = CpuScalingEnabled(); context.name_field_width = name_field_width; Index: MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_api_internal.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_api_internal.h +++ MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_api_internal.h @@ -25,6 +25,7 @@ BigO complexity; BigOFunc* complexity_lambda; UserCounters counters; + const std::vector* statistics; bool last_benchmark_instance; int repetitions; double min_time; Index: MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_register.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_register.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/benchmark_register.cc @@ -37,10 +37,10 @@ #include "check.h" #include "commandlineflags.h" #include "complexity.h" +#include "statistics.h" #include "log.h" #include "mutex.h" #include "re.h" -#include "stat.h" #include "string_util.h" #include "sysinfo.h" #include "timers.h" @@ -159,6 +159,7 @@ instance.use_manual_time = family->use_manual_time_; instance.complexity = family->complexity_; instance.complexity_lambda = family->complexity_lambda_; + instance.statistics = &family->statistics_; instance.threads = num_threads; // Add arguments to instance name @@ -236,7 +237,11 @@ use_real_time_(false), use_manual_time_(false), complexity_(oNone), - complexity_lambda_(nullptr) {} + complexity_lambda_(nullptr) { + ComputeStatistics("mean", StatisticsMean); + ComputeStatistics("median", StatisticsMedian); + ComputeStatistics("stddev", StatisticsStdDev); +} Benchmark::~Benchmark() {} @@ -409,6 +414,12 @@ return this; } +Benchmark* Benchmark::ComputeStatistics(std::string name, + StatisticsFunc* statistics) { + statistics_.emplace_back(name, statistics); + return this; +} + Benchmark* Benchmark::Threads(int t) { CHECK_GT(t, 0); thread_counts_.push_back(t); Index: MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.h +++ MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.h @@ -25,12 +25,6 @@ namespace benchmark { -// Return a vector containing the mean and standard devation information for -// the specified list of reports. If 'reports' contains less than two -// non-errored runs an empty vector is returned -std::vector ComputeStats( - const std::vector& reports); - // Return a vector containing the bigO and RMS information for the specified // list of reports. If 'reports.size() < 2' an empty vector is returned. std::vector ComputeBigO( @@ -57,4 +51,5 @@ std::string GetBigOString(BigO complexity); } // end namespace benchmark + #endif // COMPLEXITY_H_ Index: MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/complexity.cc @@ -21,7 +21,6 @@ #include #include "check.h" #include "complexity.h" -#include "stat.h" namespace benchmark { @@ -150,109 +149,6 @@ return best_fit; } -std::vector ComputeStats( - const std::vector& reports) { - typedef BenchmarkReporter::Run Run; - std::vector results; - - auto error_count = - std::count_if(reports.begin(), reports.end(), - [](Run const& run) { return run.error_occurred; }); - - if (reports.size() - error_count < 2) { - // We don't report aggregated data if there was a single run. - return results; - } - // Accumulators. - Stat1_d real_accumulated_time_stat; - Stat1_d cpu_accumulated_time_stat; - Stat1_d bytes_per_second_stat; - Stat1_d items_per_second_stat; - // All repetitions should be run with the same number of iterations so we - // can take this information from the first benchmark. - int64_t const run_iterations = reports.front().iterations; - // create stats for user counters - struct CounterStat { - Counter c; - Stat1_d s; - }; - std::map< std::string, CounterStat > counter_stats; - for(Run const& r : reports) { - for(auto const& cnt : r.counters) { - auto it = counter_stats.find(cnt.first); - if(it == counter_stats.end()) { - counter_stats.insert({cnt.first, {cnt.second, Stat1_d{}}}); - } else { - CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); - } - } - } - - // Populate the accumulators. - for (Run const& run : reports) { - CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); - CHECK_EQ(run_iterations, run.iterations); - if (run.error_occurred) continue; - real_accumulated_time_stat += - Stat1_d(run.real_accumulated_time / run.iterations); - cpu_accumulated_time_stat += - Stat1_d(run.cpu_accumulated_time / run.iterations); - items_per_second_stat += Stat1_d(run.items_per_second); - bytes_per_second_stat += Stat1_d(run.bytes_per_second); - // user counters - for(auto const& cnt : run.counters) { - auto it = counter_stats.find(cnt.first); - CHECK_NE(it, counter_stats.end()); - it->second.s += Stat1_d(cnt.second); - } - } - - // Get the data from the accumulator to BenchmarkReporter::Run's. - Run mean_data; - mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; - mean_data.iterations = run_iterations; - mean_data.real_accumulated_time = - real_accumulated_time_stat.Mean() * run_iterations; - mean_data.cpu_accumulated_time = - cpu_accumulated_time_stat.Mean() * run_iterations; - mean_data.bytes_per_second = bytes_per_second_stat.Mean(); - mean_data.items_per_second = items_per_second_stat.Mean(); - mean_data.time_unit = reports[0].time_unit; - // user counters - for(auto const& kv : counter_stats) { - auto c = Counter(kv.second.s.Mean(), counter_stats[kv.first].c.flags); - mean_data.counters[kv.first] = c; - } - - // Only add label to mean/stddev if it is same for all runs - mean_data.report_label = reports[0].report_label; - for (std::size_t i = 1; i < reports.size(); i++) { - if (reports[i].report_label != reports[0].report_label) { - mean_data.report_label = ""; - break; - } - } - - Run stddev_data; - stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev"; - stddev_data.report_label = mean_data.report_label; - stddev_data.iterations = 0; - stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev(); - stddev_data.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev(); - stddev_data.bytes_per_second = bytes_per_second_stat.StdDev(); - stddev_data.items_per_second = items_per_second_stat.StdDev(); - stddev_data.time_unit = reports[0].time_unit; - // user counters - for(auto const& kv : counter_stats) { - auto c = Counter(kv.second.s.StdDev(), counter_stats[kv.first].c.flags); - stddev_data.counters[kv.first] = c; - } - - results.push_back(mean_data); - results.push_back(stddev_data); - return results; -} - std::vector ComputeBigO( const std::vector& reports) { typedef BenchmarkReporter::Run Run; Index: MicroBenchmarks/libs/benchmark-1.3.0/src/console_reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/console_reporter.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/console_reporter.cc @@ -148,7 +148,7 @@ } for (auto& c : result.counters) { - auto const& s = HumanReadableNumber(c.second.value); + auto const& s = HumanReadableNumber(c.second.value, 1000); if (output_options_ & OO_Tabular) { if (c.second.flags & Counter::kIsRate) { printer(Out, COLOR_DEFAULT, " %8s/s", s.c_str()); Index: MicroBenchmarks/libs/benchmark-1.3.0/src/json_reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/json_reporter.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/json_reporter.cc @@ -21,6 +21,8 @@ #include #include #include +#include // for setprecision +#include #include "string_util.h" #include "timers.h" @@ -48,7 +50,14 @@ } std::string FormatKV(std::string const& key, double value) { - return StringPrintF("\"%s\": %.2f", key.c_str(), value); + std::stringstream ss; + ss << '"' << key << "\": "; + + const auto max_digits10 = std::numeric_limits::max_digits10; + const auto max_fractional_digits10 = max_digits10 - 1; + + ss << std::scientific << std::setprecision(max_fractional_digits10) << value; + return ss.str(); } int64_t RoundDouble(double v) { return static_cast(v + 0.5); } @@ -125,18 +134,18 @@ if (!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; out << indent - << FormatKV("real_time", RoundDouble(run.GetAdjustedRealTime())) + << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n"; out << indent - << FormatKV("cpu_time", RoundDouble(run.GetAdjustedCPUTime())); + << FormatKV("cpu_time", run.GetAdjustedCPUTime()); out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); } else if (run.report_big_o) { out << indent - << FormatKV("cpu_coefficient", RoundDouble(run.GetAdjustedCPUTime())) + << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n"; out << indent - << FormatKV("real_coefficient", RoundDouble(run.GetAdjustedRealTime())) + << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n"; out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); @@ -147,17 +156,17 @@ if (run.bytes_per_second > 0.0) { out << ",\n" << indent - << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second)); + << FormatKV("bytes_per_second", run.bytes_per_second); } if (run.items_per_second > 0.0) { out << ",\n" << indent - << FormatKV("items_per_second", RoundDouble(run.items_per_second)); + << FormatKV("items_per_second", run.items_per_second); } for(auto &c : run.counters) { out << ",\n" << indent - << FormatKV(c.first, RoundDouble(c.second)); + << FormatKV(c.first, c.second); } if (!run.report_label.empty()) { out << ",\n" << indent << FormatKV("label", run.report_label); Index: MicroBenchmarks/libs/benchmark-1.3.0/src/reporter.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/reporter.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/reporter.cc @@ -22,7 +22,6 @@ #include #include "check.h" -#include "stat.h" namespace benchmark { Index: MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.h =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.h @@ -0,0 +1,37 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// Copyright 2017 Roman Lebedev. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef STATISTICS_H_ +#define STATISTICS_H_ + +#include + +#include "benchmark/benchmark.h" + +namespace benchmark { + +// Return a vector containing the mean, median and standard devation information +// (and any user-specified info) for the specified list of reports. If 'reports' +// contains less than two non-errored runs an empty vector is returned +std::vector ComputeStats( + const std::vector& reports); + +double StatisticsMean(const std::vector& v); +double StatisticsMedian(const std::vector& v); +double StatisticsStdDev(const std::vector& v); + +} // end namespace benchmark + +#endif // STATISTICS_H_ Index: MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.3.0/src/statistics.cc @@ -0,0 +1,175 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// Copyright 2017 Roman Lebedev. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark/benchmark.h" + +#include +#include +#include +#include +#include +#include "check.h" +#include "statistics.h" + +namespace benchmark { + +auto StatisticsSum = [](const std::vector& v) { + return std::accumulate(v.begin(), v.end(), 0.0); +}; + +double StatisticsMean(const std::vector& v) { + if (v.size() == 0) return 0.0; + return StatisticsSum(v) * (1.0 / v.size()); +} + +double StatisticsMedian(const std::vector& v) { + if (v.size() < 3) return StatisticsMean(v); + std::vector partial; + // we need roundDown(count/2)+1 slots + partial.resize(1 + (v.size() / 2)); + std::partial_sort_copy(v.begin(), v.end(), partial.begin(), partial.end()); + // did we have odd number of samples? + // if yes, then the last element of partially-sorted vector is the median + // it no, then the average of the last two elements is the median + if(v.size() % 2 == 1) + return partial.back(); + return (partial[partial.size() - 2] + partial[partial.size() - 1]) / 2.0; +} + +// Return the sum of the squares of this sample set +auto SumSquares = [](const std::vector& v) { + return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); +}; + +auto Sqr = [](const double dat) { return dat * dat; }; +auto Sqrt = [](const double dat) { + // Avoid NaN due to imprecision in the calculations + if (dat < 0.0) return 0.0; + return std::sqrt(dat); +}; + +double StatisticsStdDev(const std::vector& v) { + const auto mean = StatisticsMean(v); + if (v.size() == 0) return mean; + + // Sample standard deviation is undefined for n = 1 + if (v.size() == 1) + return 0.0; + + const double avg_squares = SumSquares(v) * (1.0 / v.size()); + return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); +} + +std::vector ComputeStats( + const std::vector& reports) { + typedef BenchmarkReporter::Run Run; + std::vector results; + + auto error_count = + std::count_if(reports.begin(), reports.end(), + [](Run const& run) { return run.error_occurred; }); + + if (reports.size() - error_count < 2) { + // We don't report aggregated data if there was a single run. + return results; + } + + // Accumulators. + std::vector real_accumulated_time_stat; + std::vector cpu_accumulated_time_stat; + std::vector bytes_per_second_stat; + std::vector items_per_second_stat; + + real_accumulated_time_stat.reserve(reports.size()); + cpu_accumulated_time_stat.reserve(reports.size()); + bytes_per_second_stat.reserve(reports.size()); + items_per_second_stat.reserve(reports.size()); + + // All repetitions should be run with the same number of iterations so we + // can take this information from the first benchmark. + int64_t const run_iterations = reports.front().iterations; + // create stats for user counters + struct CounterStat { + Counter c; + std::vector s; + }; + std::map< std::string, CounterStat > counter_stats; + for(Run const& r : reports) { + for(auto const& cnt : r.counters) { + auto it = counter_stats.find(cnt.first); + if(it == counter_stats.end()) { + counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); + it = counter_stats.find(cnt.first); + it->second.s.reserve(reports.size()); + } else { + CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); + } + } + } + + // Populate the accumulators. + for (Run const& run : reports) { + CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); + CHECK_EQ(run_iterations, run.iterations); + if (run.error_occurred) continue; + real_accumulated_time_stat.emplace_back(run.real_accumulated_time); + cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); + items_per_second_stat.emplace_back(run.items_per_second); + bytes_per_second_stat.emplace_back(run.bytes_per_second); + // user counters + for(auto const& cnt : run.counters) { + auto it = counter_stats.find(cnt.first); + CHECK_NE(it, counter_stats.end()); + it->second.s.emplace_back(cnt.second); + } + } + + // Only add label if it is same for all runs + std::string report_label = reports[0].report_label; + for (std::size_t i = 1; i < reports.size(); i++) { + if (reports[i].report_label != report_label) { + report_label = ""; + break; + } + } + + for(const auto& Stat : *reports[0].statistics) { + // Get the data from the accumulator to BenchmarkReporter::Run's. + Run data; + data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_; + data.report_label = report_label; + data.iterations = run_iterations; + + data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); + data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); + data.bytes_per_second = Stat.compute_(bytes_per_second_stat); + data.items_per_second = Stat.compute_(items_per_second_stat); + + data.time_unit = reports[0].time_unit; + + // user counters + for(auto const& kv : counter_stats) { + const auto uc_stat = Stat.compute_(kv.second.s); + auto c = Counter(uc_stat, counter_stats[kv.first].c.flags); + data.counters[kv.first] = c; + } + + results.push_back(data); + } + + return results; +} + +} // end namespace benchmark Index: MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.h =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.h +++ MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.h @@ -10,7 +10,7 @@ void AppendHumanReadable(int n, std::string* str); -std::string HumanReadableNumber(double n); +std::string HumanReadableNumber(double n, double one_k = 1024.0); std::string StringPrintF(const char* format, ...); Index: MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/src/string_util.cc @@ -27,8 +27,6 @@ static const int64_t kUnitsSize = arraysize(kBigSIUnits); -} // end anonymous namespace - void ToExponentAndMantissa(double val, double thresh, int precision, double one_k, std::string* mantissa, int64_t* exponent) { @@ -100,14 +98,16 @@ } std::string ToBinaryStringFullySpecified(double value, double threshold, - int precision) { + int precision, double one_k = 1024.0) { std::string mantissa; int64_t exponent; - ToExponentAndMantissa(value, threshold, precision, 1024.0, &mantissa, + ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, &exponent); return mantissa + ExponentToPrefix(exponent, false); } +} // end namespace + void AppendHumanReadable(int n, std::string* str) { std::stringstream ss; // Round down to the nearest SI prefix. @@ -115,11 +115,11 @@ *str += ss.str(); } -std::string HumanReadableNumber(double n) { +std::string HumanReadableNumber(double n, double one_k) { // 1.1 means that figures up to 1.1k should be shown with the next unit down; // this softens edge effects. // 1 means that we should show one decimal place of precision. - return ToBinaryStringFullySpecified(n, 1.1, 1); + return ToBinaryStringFullySpecified(n, 1.1, 1, one_k); } std::string StringPrintFImp(const char* msg, va_list args) { Index: MicroBenchmarks/libs/benchmark-1.3.0/test/CMakeLists.txt =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/CMakeLists.txt +++ MicroBenchmarks/libs/benchmark-1.3.0/test/CMakeLists.txt @@ -98,6 +98,9 @@ compile_output_test(reporter_output_test) add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01) +compile_output_test(templated_fixture_test) +add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01) + compile_output_test(user_counters_test) add_test(user_counters_test user_counters_test --benchmark_min_time=0.01) @@ -106,13 +109,20 @@ check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) if (BENCHMARK_HAS_CXX03_FLAG) - set(CXX03_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE "-std=c++11" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") - string(REPLACE "-std=c++0x" "-std=c++03" CXX03_FLAGS "${CXX03_FLAGS}") - compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test - PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}") + PROPERTIES + COMPILE_FLAGS "-std=c++03") + # libstdc++ provides different definitions within between dialects. When + # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation + # causing the test to fail to compile. To prevent this we explicitly disable + # the warning. + check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) + if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) + set_target_properties(cxx03_test + PROPERTIES + LINK_FLAGS "-Wno-odr") + endif() add_test(cxx03 cxx03_test --benchmark_min_time=0.01) endif() Index: MicroBenchmarks/libs/benchmark-1.3.0/test/basic_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/basic_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/basic_test.cc @@ -4,7 +4,7 @@ #define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192) void BM_empty(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } } @@ -12,7 +12,7 @@ BENCHMARK(BM_empty)->ThreadPerCpu(); void BM_spin_empty(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int x = 0; x < state.range(0); ++x) { benchmark::DoNotOptimize(x); } @@ -25,7 +25,7 @@ for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -35,7 +35,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); void BM_spin_pause_during(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { state.PauseTiming(); for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); @@ -50,7 +50,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu(); void BM_pause_during(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { state.PauseTiming(); state.ResumeTiming(); } @@ -61,7 +61,7 @@ BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); void BM_spin_pause_after(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -77,7 +77,7 @@ for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } @@ -90,10 +90,29 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu(); void BM_empty_stop_start(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); + +void BM_KeepRunning(benchmark::State& state) { + size_t iter_count = 0; + while (state.KeepRunning()) { + ++iter_count; + } + assert(iter_count == state.max_iterations); +} +BENCHMARK(BM_KeepRunning); + +void BM_RangedFor(benchmark::State& state) { + size_t iter_count = 0; + for (auto _ : state) { + ++iter_count; + } + assert(iter_count == state.max_iterations); +} +BENCHMARK(BM_RangedFor); + BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.3.0/test/benchmark_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/benchmark_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/benchmark_test.cc @@ -42,7 +42,7 @@ std::set ConstructRandomSet(int size) { std::set s; - for (int i = 0; i < size; ++i) s.insert(i); + for (int i = 0; i < size; ++i) s.insert(s.end(), i); return s; } @@ -53,7 +53,7 @@ static void BM_Factorial(benchmark::State& state) { int fac_42 = 0; - while (state.KeepRunning()) fac_42 = Factorial(8); + for (auto _ : state) fac_42 = Factorial(8); // Prevent compiler optimizations std::stringstream ss; ss << fac_42; @@ -64,7 +64,7 @@ static void BM_CalculatePiRange(benchmark::State& state) { double pi = 0.0; - while (state.KeepRunning()) pi = CalculatePi(state.range(0)); + for (auto _ : state) pi = CalculatePi(state.range(0)); std::stringstream ss; ss << pi; state.SetLabel(ss.str()); @@ -73,7 +73,7 @@ static void BM_CalculatePi(benchmark::State& state) { static const int depth = 1024; - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(CalculatePi(depth)); } } @@ -82,22 +82,26 @@ BENCHMARK(BM_CalculatePi)->ThreadPerCpu(); static void BM_SetInsert(benchmark::State& state) { - while (state.KeepRunning()) { + std::set data; + for (auto _ : state) { state.PauseTiming(); - std::set data = ConstructRandomSet(state.range(0)); + data = ConstructRandomSet(state.range(0)); state.ResumeTiming(); for (int j = 0; j < state.range(1); ++j) data.insert(rand()); } state.SetItemsProcessed(state.iterations() * state.range(1)); state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); } -BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {1, 10}}); + +// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, +// non-timed part of each iteration will make the benchmark take forever. +BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); template static void BM_Sequential(benchmark::State& state) { ValueType v = 42; - while (state.KeepRunning()) { + for (auto _ : state) { Container c; for (int i = state.range(0); --i;) c.push_back(v); } @@ -109,14 +113,14 @@ ->Range(1 << 0, 1 << 10); BENCHMARK_TEMPLATE(BM_Sequential, std::list)->Range(1 << 0, 1 << 10); // Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond. -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 BENCHMARK_TEMPLATE(BM_Sequential, std::vector, int)->Arg(512); #endif static void BM_StringCompare(benchmark::State& state) { std::string s1(state.range(0), '-'); std::string s2(state.range(0), '-'); - while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2)); + for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2)); } BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); @@ -126,7 +130,7 @@ test_vector = new std::vector(); } int i = 0; - while (state.KeepRunning()) { + for (auto _ : state) { std::lock_guard l(test_vector_mu); if (i % 2 == 0) test_vector->push_back(i); @@ -142,7 +146,7 @@ static void BM_LongTest(benchmark::State& state) { double tracker = 0.0; - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < state.range(0); ++i) benchmark::DoNotOptimize(tracker += i); } @@ -159,7 +163,7 @@ test_vector = new std::vector(size); } - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = from; i < to; i++) { // No need to lock test_vector_mu as ranges // do not overlap between threads. @@ -179,7 +183,7 @@ std::chrono::duration sleep_duration{ static_cast(microseconds)}; - while (state.KeepRunning()) { + for (auto _ : state) { auto start = std::chrono::high_resolution_clock::now(); // Simulate some useful workload with a sleep std::this_thread::sleep_for( @@ -197,11 +201,11 @@ BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime(); BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime(); -#if __cplusplus >= 201103L +#ifdef BENCHMARK_HAS_CXX11 template void BM_with_args(benchmark::State& state, Args&&...) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44); @@ -213,7 +217,7 @@ } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); -#endif // __cplusplus >= 201103L +#endif // BENCHMARK_HAS_CXX11 static void BM_DenseThreadRanges(benchmark::State& st) { switch (st.range(0)) { Index: MicroBenchmarks/libs/benchmark-1.3.0/test/complexity_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/complexity_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/complexity_test.cc @@ -25,8 +25,8 @@ {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. {"^%rms_name %rms %rms[ ]*$", MR_Next}}); AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, - {"\"cpu_coefficient\": [0-9]+,$", MR_Next}, - {"\"real_coefficient\": [0-9]{1,5},$", MR_Next}, + {"\"cpu_coefficient\": %float,$", MR_Next}, + {"\"real_coefficient\": %float,$", MR_Next}, {"\"big_o\": \"%bigo\",$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next}, {"}", MR_Next}, @@ -46,7 +46,7 @@ // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < 1024; ++i) { benchmark::DoNotOptimize(&i); } @@ -94,7 +94,7 @@ auto v = ConstructRandomVector(state.range(0)); const int item_not_in_vector = state.range(0) * 2; // Test worst case scenario (item not in vector) - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); } state.SetComplexityN(state.range(0)); @@ -129,7 +129,7 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range(0)); - while (state.KeepRunning()) { + for (auto _ : state) { std::sort(v.begin(), v.end()); } state.SetComplexityN(state.range(0)); Index: MicroBenchmarks/libs/benchmark-1.3.0/test/cxx03_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/cxx03_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/cxx03_test.cc @@ -8,6 +8,10 @@ #error C++11 or greater detected. Should be C++03. #endif +#ifdef BENCHMARK_HAS_CXX11 +#error C++11 or greater detected by the library. BENCHMARK_HAS_CXX11 is defined. +#endif + void BM_empty(benchmark::State& state) { while (state.KeepRunning()) { volatile std::size_t x = state.iterations(); @@ -39,6 +43,17 @@ BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE1(BM_template1, int); +template +struct BM_Fixture : public ::benchmark::Fixture { +}; + +BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { + BM_empty(state); +} +BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { + BM_empty(state); +} + void BM_counters(benchmark::State& state) { BM_empty(state); state.counters["Foo"] = 2; Index: MicroBenchmarks/libs/benchmark-1.3.0/test/diagnostics_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/diagnostics_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/diagnostics_test.cc @@ -47,7 +47,7 @@ if (called_once == false) try_invalid_pause_resume(state); - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } @@ -57,6 +57,22 @@ } BENCHMARK(BM_diagnostic_test); + +void BM_diagnostic_test_keep_running(benchmark::State& state) { + static bool called_once = false; + + if (called_once == false) try_invalid_pause_resume(state); + + while(state.KeepRunning()) { + benchmark::DoNotOptimize(state.iterations()); + } + + if (called_once == false) try_invalid_pause_resume(state); + + called_once = true; +} +BENCHMARK(BM_diagnostic_test_keep_running); + int main(int argc, char* argv[]) { benchmark::internal::GetAbortHandler() = &TestHandler; benchmark::Initialize(&argc, argv); Index: MicroBenchmarks/libs/benchmark-1.3.0/test/filter_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/filter_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/filter_test.cc @@ -36,31 +36,31 @@ } // end namespace static void NoPrefix(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(NoPrefix); static void BM_Foo(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_Foo); static void BM_Bar(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_Bar); static void BM_FooBar(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_FooBar); static void BM_FooBa(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_FooBa); Index: MicroBenchmarks/libs/benchmark-1.3.0/test/fixture_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/fixture_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/fixture_test.cc @@ -28,7 +28,7 @@ BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { assert(data.get() != nullptr); assert(*data == 42); - while (st.KeepRunning()) { + for (auto _ : st) { } } @@ -37,7 +37,7 @@ assert(data.get() != nullptr); assert(*data == 42); } - while (st.KeepRunning()) { + for (auto _ : st) { assert(data.get() != nullptr); assert(*data == 42); } Index: MicroBenchmarks/libs/benchmark-1.3.0/test/map_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/map_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/map_test.cc @@ -18,9 +18,10 @@ // Basic version. static void BM_MapLookup(benchmark::State& state) { const int size = state.range(0); - while (state.KeepRunning()) { + std::map m; + for (auto _ : state) { state.PauseTiming(); - std::map m = ConstructRandomMap(size); + m = ConstructRandomMap(size); state.ResumeTiming(); for (int i = 0; i < size; ++i) { benchmark::DoNotOptimize(m.find(rand() % size)); @@ -44,7 +45,7 @@ BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) { const int size = state.range(0); - while (state.KeepRunning()) { + for (auto _ : state) { for (int i = 0; i < size; ++i) { benchmark::DoNotOptimize(m.find(rand() % size)); } Index: MicroBenchmarks/libs/benchmark-1.3.0/test/multiple_ranges_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/multiple_ranges_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/multiple_ranges_test.cc @@ -43,7 +43,7 @@ }; BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { int product = state.range(0) * state.range(1) * state.range(2); for (int x = 0; x < product; x++) { benchmark::DoNotOptimize(x); @@ -60,13 +60,13 @@ // Test that the 'range()' without an argument is the same as 'range(0)'. assert(state.range() == state.range(0)); assert(state.range() != state.range(1)); - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}}); static void BM_MultipleRanges(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } } BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}}); Index: MicroBenchmarks/libs/benchmark-1.3.0/test/options_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/options_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/options_test.cc @@ -8,13 +8,13 @@ #include void BM_basic(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } void BM_basic_slow(benchmark::State& state) { std::chrono::milliseconds sleep_duration(state.range(0)); - while (state.KeepRunning()) { + for (auto _ : state) { std::this_thread::sleep_for( std::chrono::duration_cast(sleep_duration)); } @@ -44,7 +44,7 @@ BENCHMARK(BM_basic)->Apply(CustomArgs); -void BM_explicit_iteration_count(benchmark::State& st) { +void BM_explicit_iteration_count(benchmark::State& state) { // Test that benchmarks specified with an explicit iteration count are // only run once. static bool invoked_before = false; @@ -52,12 +52,12 @@ invoked_before = true; // Test that the requested iteration count is respected. - assert(st.max_iterations == 42); + assert(state.max_iterations == 42); size_t actual_iterations = 0; - while (st.KeepRunning()) + for (auto _ : state) ++actual_iterations; - assert(st.iterations() == st.max_iterations); - assert(st.iterations() == 42); + assert(state.iterations() == state.max_iterations); + assert(state.iterations() == 42); } BENCHMARK(BM_explicit_iteration_count)->Iterations(42); Index: MicroBenchmarks/libs/benchmark-1.3.0/test/register_benchmark_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/register_benchmark_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/register_benchmark_test.cc @@ -61,7 +61,7 @@ // Test RegisterBenchmark with no additional arguments //----------------------------------------------------------------------------// void BM_function(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_function); @@ -77,7 +77,7 @@ #ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK void BM_extra_args(benchmark::State& st, const char* label) { - while (st.KeepRunning()) { + for (auto _ : st) { } st.SetLabel(label); } @@ -99,7 +99,7 @@ struct CustomFixture { void operator()(benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } } }; @@ -116,7 +116,7 @@ { const char* x = "42"; auto capturing_lam = [=](benchmark::State& st) { - while (st.KeepRunning()) { + for (auto _ : st) { } st.SetLabel(x); }; Index: MicroBenchmarks/libs/benchmark-1.3.0/test/reporter_output_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/reporter_output_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/reporter_output_test.cc @@ -20,7 +20,7 @@ // ========================================================================= // void BM_basic(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_basic); @@ -28,8 +28,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\"$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); @@ -39,20 +39,20 @@ // ========================================================================= // void BM_bytes_per_second(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetBytesProcessed(1); } BENCHMARK(BM_bytes_per_second); ADD_CASES(TC_ConsoleOut, - {{"^BM_bytes_per_second %console_report +%floatB/s$"}}); + {{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bytes_per_second\": %int$", MR_Next}, + {"\"bytes_per_second\": %float$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); @@ -61,20 +61,20 @@ // ========================================================================= // void BM_items_per_second(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetItemsProcessed(1); } BENCHMARK(BM_items_per_second); ADD_CASES(TC_ConsoleOut, - {{"^BM_items_per_second %console_report +%float items/s$"}}); + {{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"items_per_second\": %int$", MR_Next}, + {"\"items_per_second\": %float$", MR_Next}, {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}}); @@ -83,7 +83,7 @@ // ========================================================================= // void BM_label(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetLabel("some label"); } @@ -92,8 +92,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"label\": \"some label\"$", MR_Next}, {"}", MR_Next}}); @@ -106,7 +106,7 @@ void BM_error(benchmark::State& state) { state.SkipWithError("message"); - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_error); @@ -123,7 +123,7 @@ // ========================================================================= // void BM_no_arg_name(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_no_arg_name)->Arg(3); @@ -136,7 +136,7 @@ // ========================================================================= // void BM_arg_name(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); @@ -149,7 +149,7 @@ // ========================================================================= // void BM_arg_names(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); @@ -163,7 +163,7 @@ // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.SetComplexityN(state.range(0)); } @@ -179,30 +179,74 @@ // Test that non-aggregate data is printed by default void BM_Repeat(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } +// need two repetitions min to be able to output any aggregate output +BENCHMARK(BM_Repeat)->Repetitions(2); +ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2_mean %console_report$"}, + {"^BM_Repeat/repeats:2_median %console_report$"}, + {"^BM_Repeat/repeats:2_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, + {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); +// but for two repetitions, mean and median is the same, so let's repeat.. BENCHMARK(BM_Repeat)->Repetitions(3); ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3 %console_report$"}, {"^BM_Repeat/repeats:3_mean %console_report$"}, + {"^BM_Repeat/repeats:3_median %console_report$"}, {"^BM_Repeat/repeats:3_stddev %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:3_median\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); +// median differs between even/odd number of repetitions, so just to be sure +BENCHMARK(BM_Repeat)->Repetitions(4); +ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4_mean %console_report$"}, + {"^BM_Repeat/repeats:4_median %console_report$"}, + {"^BM_Repeat/repeats:4_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, + {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_mean\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_median\",%csv_report$"}, + {"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}}); // Test that a non-repeated test still prints non-aggregate results even when // only-aggregate reports have been requested void BM_RepeatOnce(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); @@ -212,23 +256,26 @@ // Test that non-aggregate data is not reported void BM_SummaryRepeat(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); ADD_CASES(TC_ConsoleOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^BM_SummaryRepeat/repeats:3_mean %console_report$"}, + {"^BM_SummaryRepeat/repeats:3_median %console_report$"}, {"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}}); ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, + {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}}); void BM_RepeatTimeUnit(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } } BENCHMARK(BM_RepeatTimeUnit) @@ -238,17 +285,59 @@ ADD_CASES(TC_ConsoleOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"}, + {"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"}, {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}}); ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, + {"\"time_unit\": \"us\",?$"}, {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, {"\"time_unit\": \"us\",?$"}}); ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, + {"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"}, {"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}}); +// ========================================================================= // +// -------------------- Testing user-provided statistics ------------------- // +// ========================================================================= // + +const auto UserStatistics = [](const std::vector& v) { + return v.back(); +}; +void BM_UserStats(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_UserStats) + ->Repetitions(3) + ->ComputeStatistics("", UserStatistics); +// check that user-provided stats is calculated, and is after the default-ones +// empty string as name is intentional, it would sort before anything else +ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3 %console_report$"}, + {"^BM_UserStats/repeats:3_mean %console_report$"}, + {"^BM_UserStats/repeats:3_median %console_report$"}, + {"^BM_UserStats/repeats:3_stddev %console_report$"}, + {"^BM_UserStats/repeats:3_ %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_mean\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_median\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_stddev\",$"}, + {"\"name\": \"BM_UserStats/repeats:3_\",$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_mean\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_median\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"}, + {"^\"BM_UserStats/repeats:3_\",%csv_report$"}}); + // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // // ========================================================================= // Index: MicroBenchmarks/libs/benchmark-1.3.0/test/skip_with_error_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/skip_with_error_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/skip_with_error_test.cc @@ -70,6 +70,15 @@ BENCHMARK(BM_error_before_running); ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); +void BM_error_before_running_range_for(benchmark::State& state) { + state.SkipWithError("error message"); + for (auto _ : state) { + assert(false); + } +} +BENCHMARK(BM_error_before_running_range_for); +ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); + void BM_error_during_running(benchmark::State& state) { int first_iter = true; while (state.KeepRunning()) { @@ -93,8 +102,31 @@ {"/2/threads:4", false, ""}, {"/2/threads:8", false, ""}}); +void BM_error_during_running_ranged_for(benchmark::State& state) { + assert(state.max_iterations > 3 && "test requires at least a few iterations"); + int first_iter = true; + // NOTE: Users should not write the for loop explicitly. + for (auto It = state.begin(), End = state.end(); It != End; ++It) { + if (state.range(0) == 1) { + assert(first_iter); + first_iter = false; + state.SkipWithError("error message"); + // Test the unfortunate but documented behavior that the ranged-for loop + // doesn't automatically terminate when SkipWithError is set. + assert(++It != End); + break; // Required behavior + } + } +} +BENCHMARK(BM_error_during_running_ranged_for)->Arg(1)->Arg(2)->Iterations(5); +ADD_CASES("BM_error_during_running_ranged_for", + {{"/1/iterations:5", true, "error message"}, + {"/2/iterations:5", false, ""}}); + + + void BM_error_after_running(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } if (state.thread_index <= (state.threads / 2)) Index: MicroBenchmarks/libs/benchmark-1.3.0/test/templated_fixture_test.cc =================================================================== --- /dev/null +++ MicroBenchmarks/libs/benchmark-1.3.0/test/templated_fixture_test.cc @@ -0,0 +1,28 @@ + +#include "benchmark/benchmark.h" + +#include +#include + +template +class MyFixture : public ::benchmark::Fixture { +public: + MyFixture() : data(0) {} + + T data; +}; + +BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) { + for (auto _ : st) { + data += 1; + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) { + for (auto _ : st) { + data += 1.0; + } +} +BENCHMARK_REGISTER_F(MyFixture, Bar); + +BENCHMARK_MAIN() Index: MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_tabular_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_tabular_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_tabular_test.cc @@ -54,7 +54,7 @@ // ========================================================================= // void BM_Counters_Tabular(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters.insert({ @@ -69,8 +69,8 @@ BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next}, {"\"Bat\": %float,$", MR_Next}, @@ -98,7 +98,7 @@ // ========================================================================= // void BM_CounterRates_Tabular(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters.insert({ @@ -113,8 +113,8 @@ BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next}, {"\"Bat\": %float,$", MR_Next}, @@ -145,7 +145,7 @@ // set only some of the counters void BM_CounterSet0_Tabular(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters.insert({ @@ -157,8 +157,8 @@ BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next}, @@ -177,7 +177,7 @@ // again. void BM_CounterSet1_Tabular(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters.insert({ @@ -189,8 +189,8 @@ BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bar\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next}, @@ -213,7 +213,7 @@ // set only some of the counters, different set now. void BM_CounterSet2_Tabular(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters.insert({ @@ -225,8 +225,8 @@ BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"Bat\": %float,$", MR_Next}, {"\"Baz\": %float,$", MR_Next}, Index: MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_test.cc =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_test.cc +++ MicroBenchmarks/libs/benchmark-1.3.0/test/user_counters_test.cc @@ -19,7 +19,7 @@ // ========================================================================= // void BM_Counters_Simple(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.counters["foo"] = 1; state.counters["bar"] = 2 * (double)state.iterations(); @@ -28,8 +28,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, @@ -51,7 +51,7 @@ namespace { int num_calls1 = 0; } void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.counters["foo"] = 1; state.counters["bar"] = ++num_calls1; @@ -64,11 +64,11 @@ "bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bytes_per_second\": %int,$", MR_Next}, - {"\"items_per_second\": %int,$", MR_Next}, + {"\"bytes_per_second\": %float,$", MR_Next}, + {"\"items_per_second\": %float,$", MR_Next}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, {"}", MR_Next}}); @@ -92,7 +92,7 @@ // ========================================================================= // void BM_Counters_Rate(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; @@ -102,8 +102,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, @@ -124,7 +124,7 @@ // ========================================================================= // void BM_Counters_Threads(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } state.counters["foo"] = 1; state.counters["bar"] = 2; @@ -133,8 +133,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, @@ -153,7 +153,7 @@ // ========================================================================= // void BM_Counters_AvgThreads(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads}; @@ -163,8 +163,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, @@ -184,7 +184,7 @@ // ========================================================================= // void BM_Counters_AvgThreadsRate(benchmark::State& state) { - while (state.KeepRunning()) { + for (auto _ : state) { } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; @@ -194,8 +194,8 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %int,$", MR_Next}, - {"\"cpu_time\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, {"\"time_unit\": \"ns\",$", MR_Next}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, Index: MicroBenchmarks/libs/benchmark-1.3.0/tools/compare_bench.py =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/tools/compare_bench.py +++ MicroBenchmarks/libs/benchmark-1.3.0/tools/compare_bench.py @@ -39,21 +39,20 @@ parser.add_argument( 'test2', metavar='test2', type=str, nargs=1, help='A benchmark executable or JSON output file') - # FIXME this is a dummy argument which will never actually match - # any --benchmark flags but it helps generate a better usage message parser.add_argument( - 'benchmark_options', metavar='benchmark_option', nargs='*', + 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, help='Arguments to pass when running benchmark executables' ) args, unknown_args = parser.parse_known_args() # Parse the command line flags test1 = args.test1[0] test2 = args.test2[0] - if args.benchmark_options: + if unknown_args: + # should never happen print("Unrecognized positional argument arguments: '%s'" - % args.benchmark_options) + % unknown_args) exit(1) - benchmark_options = unknown_args + benchmark_options = args.benchmark_options check_inputs(test1, test2, benchmark_options) # Run the benchmarks and report the results json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) Index: MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run1.json =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run1.json +++ MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run1.json @@ -28,6 +28,20 @@ "cpu_time": 50, "time_unit": "ns" }, + { + "name": "BM_1PercentFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_1PercentSlower", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, { "name": "BM_10PercentFaster", "iterations": 1000, @@ -55,6 +69,34 @@ "real_time": 10000, "cpu_time": 10000, "time_unit": "ns" + }, + { + "name": "BM_10PercentCPUToTime", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_ThirdFaster", + "iterations": 1000, + "real_time": 100, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_BadTimeUnit", + "iterations": 1000, + "real_time": 0.4, + "cpu_time": 0.5, + "time_unit": "s" + }, + { + "name": "BM_DifferentTimeUnit", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" } ] -} \ No newline at end of file +} Index: MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run2.json =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run2.json +++ MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/Inputs/test1_run2.json @@ -28,6 +28,20 @@ "cpu_time": 100, "time_unit": "ns" }, + { + "name": "BM_1PercentFaster", + "iterations": 1000, + "real_time": 98.9999999, + "cpu_time": 98.9999999, + "time_unit": "ns" + }, + { + "name": "BM_1PercentSlower", + "iterations": 1000, + "real_time": 100.9999999, + "cpu_time": 100.9999999, + "time_unit": "ns" + }, { "name": "BM_10PercentFaster", "iterations": 1000, @@ -45,8 +59,8 @@ { "name": "BM_100xSlower", "iterations": 1000, - "real_time": 10000, - "cpu_time": 10000, + "real_time": 1.0000e+04, + "cpu_time": 1.0000e+04, "time_unit": "ns" }, { @@ -55,6 +69,34 @@ "real_time": 100, "cpu_time": 100, "time_unit": "ns" + }, + { + "name": "BM_10PercentCPUToTime", + "iterations": 1000, + "real_time": 110, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_ThirdFaster", + "iterations": 1000, + "real_time": 66.665, + "cpu_time": 66.664, + "time_unit": "ns" + }, + { + "name": "BM_BadTimeUnit", + "iterations": 1000, + "real_time": 0.04, + "cpu_time": 0.6, + "time_unit": "s" + }, + { + "name": "BM_DifferentTimeUnit", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "ns" } ] -} \ No newline at end of file +} Index: MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/report.py =================================================================== --- MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/report.py +++ MicroBenchmarks/libs/benchmark-1.3.0/tools/gbench/report.py @@ -71,13 +71,13 @@ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. """ - first_col_width = find_longest_name(json1['benchmarks']) + 5 + first_col_width = find_longest_name(json1['benchmarks']) def find_test(name): for b in json2['benchmarks']: if b['name'] == name: return b return None - first_line = "{:<{}s} Time CPU Old New".format( + first_line = "{:<{}s} Time CPU Time Old Time New CPU Old CPU New".format( 'Benchmark', first_col_width) output_strs = [first_line, '-' * len(first_line)] @@ -87,6 +87,9 @@ if not other_bench: continue + if bn['time_unit'] != other_bench['time_unit']: + continue + def get_color(res): if res > 0.05: return BC_FAIL @@ -94,12 +97,13 @@ return BC_WHITE else: return BC_CYAN - fmt_str = "{}{:<{}s}{endc}{}{:+9.2f}{endc}{}{:+14.2f}{endc}{:14d}{:14d}" + fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" tres = calculate_change(bn['real_time'], other_bench['real_time']) cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) output_strs += [color_format(use_color, fmt_str, BC_HEADER, bn['name'], first_col_width, get_color(tres), tres, get_color(cpures), cpures, + bn['real_time'], other_bench['real_time'], bn['cpu_time'], other_bench['cpu_time'], endc=BC_ENDC)] return output_strs @@ -123,22 +127,27 @@ def test_basic(self): expect_lines = [ - ['BM_SameTimes', '+0.00', '+0.00', '10', '10'], - ['BM_2xFaster', '-0.50', '-0.50', '50', '25'], - ['BM_2xSlower', '+1.00', '+1.00', '50', '100'], - ['BM_10PercentFaster', '-0.10', '-0.10', '100', '90'], - ['BM_10PercentSlower', '+0.10', '+0.10', '100', '110'], - ['BM_100xSlower', '+99.00', '+99.00', '100', '10000'], - ['BM_100xFaster', '-0.99', '-0.99', '10000', '100'], + ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], + ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], + ['BM_2xSlower', '+1.0000', '+1.0000', '50', '100', '50', '100'], + ['BM_1PercentFaster', '-0.0100', '-0.0100', '100', '99', '100', '99'], + ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], + ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], + ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], + ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], + ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], + ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], + ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], + ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], ] json1, json2 = self.load_results() output_lines_with_header = generate_difference_report(json1, json2, use_color=False) output_lines = output_lines_with_header[2:] print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) - for i in xrange(0, len(output_lines)): + for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] - self.assertEqual(len(parts), 5) + self.assertEqual(len(parts), 7) self.assertEqual(parts, expect_lines[i])